diff --git a/substrate/.gitlab-ci.yml b/substrate/.gitlab-ci.yml index 1ff57bf0f79ab2dddda066b779e7fe650a71dc9a..2cef2d8badcc99b481366bf0273b24c6add50739 100644 --- a/substrate/.gitlab-ci.yml +++ b/substrate/.gitlab-ci.yml @@ -233,6 +233,14 @@ cargo-deny: # FIXME: Temorarily allow to fail. allow_failure: true +cargo-fmt: + stage: test + <<: *docker-env + <<: *test-refs + script: + - cargo +nightly fmt --all -- --check + allow_failure: true + cargo-check-benches: stage: test <<: *docker-env diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index 64d8f75b00d23b2665f2c8dec672f16efc47e146..2f1fa742f078be95f8dcb673f9319db21fecad61 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -26,6 +26,7 @@ // {{arg}} {{/each}} +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/bin/node-template/node/src/chain_spec.rs b/substrate/bin/node-template/node/src/chain_spec.rs index 5093a77b571e9b711124d66c77d9c7f7cfc2ca00..7009b3be5c27964ecec9ed3af6808e3b15de7b45 100644 --- a/substrate/bin/node-template/node/src/chain_spec.rs +++ b/substrate/bin/node-template/node/src/chain_spec.rs @@ -1,12 +1,12 @@ -use sp_core::{Pair, Public, sr25519}; use node_template_runtime::{ - AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, - SudoConfig, SystemConfig, WASM_BINARY, Signature + AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, Signature, SudoConfig, + SystemConfig, WASM_BINARY, }; +use sc_service::ChainType; use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; -use sp_runtime::traits::{Verify, IdentifyAccount}; -use sc_service::ChainType; +use sp_runtime::traits::{IdentifyAccount, Verify}; // The URL for the telemetry server. // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; @@ -24,18 +24,16 @@ pub fn get_from_seed(seed: &str) -> ::Pu type AccountPublic = ::Signer; /// Generate an account ID from seed. -pub fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, { AccountPublic::from(get_from_seed::(seed)).into_account() } /// Generate an Aura authority key. pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { - ( - get_from_seed::(s), - get_from_seed::(s), - ) + (get_from_seed::(s), get_from_seed::(s)) } pub fn development_config() -> Result { @@ -47,23 +45,23 @@ pub fn development_config() -> Result { // ID "dev", ChainType::Development, - move || testnet_genesis( - wasm_binary, - // Initial PoA authorities - vec![ - authority_keys_from_seed("Alice"), - ], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ + move || { + testnet_genesis( + wasm_binary, + // Initial PoA authorities + vec![authority_keys_from_seed("Alice")], + // Sudo account get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ) + }, // Bootnodes vec![], // Telemetry @@ -86,32 +84,31 @@ pub fn local_testnet_config() -> Result { // ID "local_testnet", ChainType::Local, - move || testnet_genesis( - wasm_binary, - // Initial PoA authorities - vec![ - authority_keys_from_seed("Alice"), - authority_keys_from_seed("Bob"), - ], - // Sudo account - get_account_id_from_seed::("Alice"), - // Pre-funded accounts - vec![ + move || { + testnet_genesis( + wasm_binary, + // Initial PoA authorities + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + // Sudo account get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - true, - ), + // Pre-funded accounts + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, + ) + }, // Bootnodes vec![], // Telemetry @@ -141,7 +138,7 @@ fn testnet_genesis( }, balances: BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. - balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), }, aura: AuraConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), diff --git a/substrate/bin/node-template/node/src/cli.rs b/substrate/bin/node-template/node/src/cli.rs index 947123a6bbf5b927edc33c4a3d6d370cee359d28..8b551051c1b19bd0763680433afe9f25673dcfbf 100644 --- a/substrate/bin/node-template/node/src/cli.rs +++ b/substrate/bin/node-template/node/src/cli.rs @@ -1,5 +1,5 @@ -use structopt::StructOpt; use sc_cli::RunCmd; +use structopt::StructOpt; #[derive(Debug, StructOpt)] pub struct Cli { diff --git a/substrate/bin/node-template/node/src/command.rs b/substrate/bin/node-template/node/src/command.rs index e61dd864188253ed7510868162d6077c0db953e9..d3a04e0ae91e815a749761133b0630419d6cfe8f 100644 --- a/substrate/bin/node-template/node/src/command.rs +++ b/substrate/bin/node-template/node/src/command.rs @@ -15,11 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{chain_spec, service}; -use crate::cli::{Cli, Subcommand}; -use sc_cli::{SubstrateCli, RuntimeVersion, Role, ChainSpec}; -use sc_service::PartialComponents; +use crate::{ + chain_spec, + cli::{Cli, Subcommand}, + service, +}; use node_template_runtime::Block; +use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; +use sc_service::PartialComponents; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -50,9 +53,8 @@ impl SubstrateCli for Cli { Ok(match id { "dev" => Box::new(chain_spec::development_config()?), "" | "local" => Box::new(chain_spec::local_testnet_config()?), - path => Box::new(chain_spec::ChainSpec::from_json_file( - std::path::PathBuf::from(path), - )?), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), }) } @@ -74,32 +76,30 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.database), task_manager)) }) }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + service::new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -110,29 +110,30 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, backend, ..} - = service::new_partial(&config)?; + let PartialComponents { client, task_manager, backend, .. } = + service::new_partial(&config)?; Ok((cmd.run(client, backend), task_manager)) }) }, - Some(Subcommand::Benchmark(cmd)) => { + Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`.".into()) - } - }, + You can enable it with `--features runtime-benchmarks`." + .into()) + }, None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { match config.role { Role::Light => service::new_light(config), _ => service::new_full(config), - }.map_err(sc_cli::Error::Service) + } + .map_err(sc_cli::Error::Service) }) - } + }, } } diff --git a/substrate/bin/node-template/node/src/lib.rs b/substrate/bin/node-template/node/src/lib.rs index 777c4f0a77147b76f2d86c7318d6ca64a7323dfa..f117b8aae61929ebf8b193284f751b93c5868002 100644 --- a/substrate/bin/node-template/node/src/lib.rs +++ b/substrate/bin/node-template/node/src/lib.rs @@ -1,3 +1,3 @@ pub mod chain_spec; -pub mod service; pub mod rpc; +pub mod service; diff --git a/substrate/bin/node-template/node/src/rpc.rs b/substrate/bin/node-template/node/src/rpc.rs index a03d1aad2a8838d38bd0ff1133d641a808486aa0..d23b23178ec2a6084100e8dbba7d9ef02b8f3874 100644 --- a/substrate/bin/node-template/node/src/rpc.rs +++ b/substrate/bin/node-template/node/src/rpc.rs @@ -8,12 +8,11 @@ use std::sync::Arc; use node_template_runtime::{opaque::Block, AccountId, Balance, Index}; -use sp_api::ProvideRuntimeApi; -use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; -use sp_block_builder::BlockBuilder; pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; - +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; /// Full client dependencies. pub struct FullDeps { @@ -26,34 +25,25 @@ pub struct FullDeps { } /// Instantiate all full RPC extensions. -pub fn create_full( - deps: FullDeps, -) -> jsonrpc_core::IoHandler where +pub fn create_full(deps: FullDeps) -> jsonrpc_core::IoHandler +where C: ProvideRuntimeApi, - C: HeaderBackend + HeaderMetadata + 'static, + C: HeaderBackend + HeaderMetadata + 'static, C: Send + Sync + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, { - use substrate_frame_rpc_system::{FullSystem, SystemApi}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; let mut io = jsonrpc_core::IoHandler::default(); - let FullDeps { - client, - pool, - deny_unsafe, - } = deps; + let FullDeps { client, pool, deny_unsafe } = deps; - io.extend_with( - SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)) - ); + io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe))); - io.extend_with( - TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())) - ); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone()))); // Extend this RPC with a custom API by using the following syntax. // `YourRpcStruct` should have a reference to a client, which is needed diff --git a/substrate/bin/node-template/node/src/service.rs b/substrate/bin/node-template/node/src/service.rs index c19824e9eaa38a87e89b03cd366ed9bb54182ec2..d97f29c00bca43d7b7f33c5ea0190b3078718774 100644 --- a/substrate/bin/node-template/node/src/service.rs +++ b/substrate/bin/node-template/node/src/service.rs @@ -1,17 +1,17 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use std::{sync::Arc, time::Duration}; -use sc_client_api::{ExecutorProvider, RemoteBackend}; use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; -use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -use sc_consensus_aura::{ImportQueueParams, StartAuraParams, SlotProportion}; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_consensus::SlotData; +use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; +use std::{sync::Arc, time::Duration}; // Our native executor instance. native_executor_instance!( @@ -25,22 +25,35 @@ type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; -pub fn new_partial(config: &Configuration) -> Result, - sc_transaction_pool::FullPool, - ( - sc_finality_grandpa::GrandpaBlockImport, - sc_finality_grandpa::LinkHalf, - Option, - ) ->, ServiceError> { +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sp_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + ( + sc_finality_grandpa::GrandpaBlockImport< + FullBackend, + Block, + FullClient, + FullSelectChain, + >, + sc_finality_grandpa::LinkHalf, + Option, + ), + >, + ServiceError, +> { if config.keystore_remote.is_some() { - return Err(ServiceError::Other( - format!("Remote Keystores are not supported."))) + return Err(ServiceError::Other(format!("Remote Keystores are not supported."))) } - let telemetry = config.telemetry_endpoints.clone() + let telemetry = config + .telemetry_endpoints + .clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, sc_telemetry::Error> { let worker = TelemetryWorker::new(16)?; @@ -56,11 +69,10 @@ pub fn new_partial(config: &Configuration) -> Result Result( - ImportQueueParams { + let import_queue = + sc_consensus_aura::import_queue::(ImportQueueParams { block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), @@ -98,12 +110,13 @@ pub fn new_partial(config: &Configuration) -> Result Result if let Some(url) = &config.keystore_remote { match remote_keystore(url) { Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => { - return Err(ServiceError::Other( - format!("Error hooking up remote keystore for {}: {}", url, e))) - } + Err(e) => + return Err(ServiceError::Other(format!( + "Error hooking up remote keystore for {}: {}", + url, e + ))), }; } @@ -162,7 +176,10 @@ pub fn new_full(mut config: Configuration) -> Result if config.offchain_worker.enabled { sc_service::build_offchain_workers( - &config, task_manager.spawn_handle(), client.clone(), network.clone(), + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), ); } @@ -178,32 +195,27 @@ pub fn new_full(mut config: Configuration) -> Result let pool = transaction_pool.clone(); Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { - client: client.clone(), - pool: pool.clone(), - deny_unsafe, - }; + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; crate::rpc::create_full(deps) }) }; - let _rpc_handlers = sc_service::spawn_tasks( - sc_service::SpawnTasksParams { - network: network.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, - backend, - system_rpc_tx, - config, - telemetry: telemetry.as_mut(), - }, - )?; + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder, + on_demand: None, + remote_blockchain: None, + backend, + system_rpc_tx, + config, + telemetry: telemetry.as_mut(), + })?; if role.is_authority() { let proposer_factory = sc_basic_authorship::ProposerFactory::new( @@ -257,11 +269,8 @@ pub fn new_full(mut config: Configuration) -> Result // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None - }; + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; let grandpa_config = sc_finality_grandpa::Config { // FIXME #1578 make this available through chainspec @@ -295,7 +304,7 @@ pub fn new_full(mut config: Configuration) -> Result // if it fails we take down the service with it. task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", - sc_finality_grandpa::run_grandpa_voter(grandpa_config)? + sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, ); } @@ -305,7 +314,9 @@ pub fn new_full(mut config: Configuration) -> Result /// Builds a new service for a light client. pub fn new_light(mut config: Configuration) -> Result { - let telemetry = config.telemetry_endpoints.clone() + let telemetry = config + .telemetry_endpoints + .clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, sc_telemetry::Error> { let worker = TelemetryWorker::new(16)?; @@ -320,11 +331,10 @@ pub fn new_light(mut config: Configuration) -> Result telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), )?; - let mut telemetry = telemetry - .map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); @@ -347,8 +357,8 @@ pub fn new_light(mut config: Configuration) -> Result let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - let import_queue = sc_consensus_aura::import_queue::( - ImportQueueParams { + let import_queue = + sc_consensus_aura::import_queue::(ImportQueueParams { block_import: grandpa_block_import.clone(), justification_import: Some(Box::new(grandpa_block_import.clone())), client: client.clone(), @@ -368,8 +378,7 @@ pub fn new_light(mut config: Configuration) -> Result registry: config.prometheus_registry(), check_for_equivocation: Default::default(), telemetry: telemetry.as_ref().map(|x| x.handle()), - }, - )?; + })?; let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -384,7 +393,10 @@ pub fn new_light(mut config: Configuration) -> Result if config.offchain_worker.enabled { sc_service::build_offchain_workers( - &config, task_manager.spawn_handle(), client.clone(), network.clone(), + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), ); } diff --git a/substrate/bin/node-template/pallets/template/src/benchmarking.rs b/substrate/bin/node-template/pallets/template/src/benchmarking.rs index 93d7fa395ad6b4b6d8bbc881bde57e6f82d69910..2117c048cfbdbbd7fc56489958e9a6540f1303f4 100644 --- a/substrate/bin/node-template/pallets/template/src/benchmarking.rs +++ b/substrate/bin/node-template/pallets/template/src/benchmarking.rs @@ -2,10 +2,10 @@ use super::*; -use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; #[allow(unused)] use crate::Pallet as Template; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_system::RawOrigin; benchmarks! { do_something { @@ -17,8 +17,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Template, - crate::mock::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/substrate/bin/node-template/pallets/template/src/lib.rs b/substrate/bin/node-template/pallets/template/src/lib.rs index 373a56f44419e700fe8214d8de03169c7068a1fb..7a9830a21eb2adf1512ead6576b3d798eb00389a 100644 --- a/substrate/bin/node-template/pallets/template/src/lib.rs +++ b/substrate/bin/node-template/pallets/template/src/lib.rs @@ -3,7 +3,6 @@ /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: /// - pub use pallet::*; #[cfg(test)] @@ -63,7 +62,7 @@ pub mod pallet { // These functions materialize as "extrinsics", which are often compared to transactions. // Dispatchable functions must be annotated with a weight and must return a DispatchResult. #[pallet::call] - impl Pallet { + impl Pallet { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] diff --git a/substrate/bin/node-template/pallets/template/src/mock.rs b/substrate/bin/node-template/pallets/template/src/mock.rs index 9bea61df22edbc5bc58b8877637d8ccd4e821680..76742477000fb8f90fece1166fd368d137f7b325 100644 --- a/substrate/bin/node-template/pallets/template/src/mock.rs +++ b/substrate/bin/node-template/pallets/template/src/mock.rs @@ -1,10 +1,11 @@ use crate as pallet_template; -use sp_core::H256; use frame_support::parameter_types; +use frame_system as system; +use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; -use frame_system as system; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; diff --git a/substrate/bin/node-template/pallets/template/src/tests.rs b/substrate/bin/node-template/pallets/template/src/tests.rs index 3356b29ff35980d08d751b4eca01aac403df8380..220565860172103178ad88948d6dee492155a6dd 100644 --- a/substrate/bin/node-template/pallets/template/src/tests.rs +++ b/substrate/bin/node-template/pallets/template/src/tests.rs @@ -1,5 +1,5 @@ -use crate::{Error, mock::*}; -use frame_support::{assert_ok, assert_noop}; +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok}; #[test] fn it_works_for_default_value() { @@ -15,9 +15,6 @@ fn it_works_for_default_value() { fn correct_error_for_none_value() { new_test_ext().execute_with(|| { // Ensure the expected error is thrown when no value is present. - assert_noop!( - TemplateModule::cause_error(Origin::signed(1)), - Error::::NoneValue - ); + assert_noop!(TemplateModule::cause_error(Origin::signed(1)), Error::::NoneValue); }); } diff --git a/substrate/bin/node-template/runtime/src/lib.rs b/substrate/bin/node-template/runtime/src/lib.rs index c92eb8a1aadf82c85d1f7574597cdc9d71299c4c..f9eaa96153eba5ff212a528d590f3c1895c4b9bf 100644 --- a/substrate/bin/node-template/runtime/src/lib.rs +++ b/substrate/bin/node-template/runtime/src/lib.rs @@ -1,43 +1,44 @@ #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit="256"] +#![recursion_limit = "256"] // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use sp_std::prelude::*; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - ApplyExtrinsicResult, generic, create_runtime_str, impl_opaque_keys, MultiSignature, - transaction_validity::{TransactionValidity, TransactionSource}, -}; -use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, AccountIdLookup, Verify, IdentifyAccount, NumberFor, +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use pallet_grandpa::fg_primitives; -use sp_version::RuntimeVersion; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, +}; +use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; +use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use pallet_timestamp::Call as TimestampCall; -pub use pallet_balances::Call as BalancesCall; -pub use sp_runtime::{Permill, Perbill}; pub use frame_support::{ - construct_runtime, parameter_types, StorageValue, + construct_runtime, parameter_types, traits::{KeyOwnerProofSystem, Randomness, StorageInfo}, weights::{ - Weight, IdentityFee, constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + IdentityFee, Weight, }, + StorageValue, }; +pub use pallet_balances::Call as BalancesCall; +pub use pallet_timestamp::Call as TimestampCall; use pallet_transaction_payment::CurrencyAdapter; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; /// Import the template pallet. pub use pallet_template; @@ -123,10 +124,7 @@ pub const DAYS: BlockNumber = HOURS * 24; /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); @@ -306,7 +304,7 @@ pub type SignedExtra = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment + pallet_transaction_payment::ChargeTransactionPayment, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index 491b261518a4fd58c656443af695ed094df8264a..eeeb833c1ff123e0b8f5842fc1298631665ff238 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -24,36 +24,22 @@ //! DO NOT depend on user input). Thus transaction generation should be //! based on randomized data. -use std::{ - borrow::Cow, - collections::HashMap, - pin::Pin, - sync::Arc, -}; use futures::Future; +use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; use node_primitives::Block; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; -use sp_runtime::{ - generic::BlockId, - traits::NumberFor, - OpaqueExtrinsic, -}; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; use sc_transaction_pool_api::{ - ImportNotificationStream, - PoolFuture, - PoolStatus, - TransactionFor, - TransactionSource, - TransactionStatusStreamFor, - TxHash, + ImportNotificationStream, PoolFuture, PoolStatus, TransactionFor, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; +use sp_runtime::{generic::BlockId, traits::NumberFor, OpaqueExtrinsic}; use crate::{ common::SizeType, - core::{self, Path, Mode}, + core::{self, Mode, Path}, }; pub struct ConstructionBenchmarkDescription { @@ -72,7 +58,6 @@ pub struct ConstructionBenchmark { impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn path(&self) -> Path { - let mut path = Path::new(&["node", "proposer"]); match self.profile { @@ -104,11 +89,7 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn setup(self: Box) -> Box { let mut extrinsics: Vec> = Vec::new(); - let mut bench_db = BenchDb::with_key_types( - self.database_type, - 50_000, - self.key_types - ); + let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types); let client = bench_db.client(); @@ -127,11 +108,9 @@ impl core::BenchmarkDescription for ConstructionBenchmarkDescription { fn name(&self) -> Cow<'static, str> { format!( "Block construction ({:?}/{}, {:?}, {:?} backend)", - self.block_type, - self.size, - self.profile, - self.database_type, - ).into() + self.block_type, self.size, self.profile, self.database_type, + ) + .into() } } @@ -139,7 +118,9 @@ impl core::Benchmark for ConstructionBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let context = self.database.create_context(self.profile); - let _ = context.client.runtime_version_at(&BlockId::Number(0)) + let _ = context + .client + .runtime_version_at(&BlockId::Number(0)) .expect("Failed to get runtime version") .spec_version; @@ -158,20 +139,25 @@ impl core::Benchmark for ConstructionBenchmark { let start = std::time::Instant::now(); - let proposer = futures::executor::block_on(proposer_factory.init( - &context.client.header(&BlockId::number(0)) - .expect("Database error querying block #0") - .expect("Block #0 should exist"), - )).expect("Proposer initialization failed"); - - let _block = futures::executor::block_on( - proposer.propose( - timestamp_provider.create_inherent_data().expect("Create inherent data failed"), - Default::default(), - std::time::Duration::from_secs(20), - None, + let proposer = futures::executor::block_on( + proposer_factory.init( + &context + .client + .header(&BlockId::number(0)) + .expect("Database error querying block #0") + .expect("Block #0 should exist"), ), - ).map(|r| r.block).expect("Proposing failed"); + ) + .expect("Proposer initialization failed"); + + let _block = futures::executor::block_on(proposer.propose( + timestamp_provider.create_inherent_data().expect("Create inherent data failed"), + Default::default(), + std::time::Duration::from_secs(20), + None, + )) + .map(|r| r.block) + .expect("Proposing failed"); let elapsed = start.elapsed(); @@ -191,10 +177,7 @@ pub struct PoolTransaction { impl From for PoolTransaction { fn from(e: OpaqueExtrinsic) -> Self { - PoolTransaction { - data: e, - hash: node_primitives::Hash::zero(), - } + PoolTransaction { data: e, hash: node_primitives::Hash::zero() } } } @@ -210,15 +193,25 @@ impl sc_transaction_pool_api::InPoolTransaction for PoolTransaction { &self.hash } - fn priority(&self) -> &u64 { unimplemented!() } + fn priority(&self) -> &u64 { + unimplemented!() + } - fn longevity(&self) -> &u64 { unimplemented!() } + fn longevity(&self) -> &u64 { + unimplemented!() + } - fn requires(&self) -> &[Vec] { unimplemented!() } + fn requires(&self) -> &[Vec] { + unimplemented!() + } - fn provides(&self) -> &[Vec] { unimplemented!() } + fn provides(&self) -> &[Vec] { + unimplemented!() + } - fn is_propagable(&self) -> bool { unimplemented!() } + fn is_propagable(&self) -> bool { + unimplemented!() + } } #[derive(Clone, Debug)] @@ -236,7 +229,7 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { _at: &BlockId, _source: TransactionSource, _xts: Vec>, - ) -> PoolFuture>, Self::Error> { + ) -> PoolFuture>, Self::Error> { unimplemented!() } @@ -259,14 +252,21 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { unimplemented!() } - fn ready_at(&self, _at: NumberFor) - -> Pin> + Send>> + Send>> - { - let iter: Box> + Send> = Box::new(self.0.clone().into_iter()); + fn ready_at( + &self, + _at: NumberFor, + ) -> Pin< + Box< + dyn Future> + Send>> + + Send, + >, + > { + let iter: Box> + Send> = + Box::new(self.0.clone().into_iter()); Box::pin(futures::future::ready(iter)) } - fn ready(&self) -> Box> + Send> { + fn ready(&self) -> Box> + Send> { unimplemented!() } diff --git a/substrate/bin/node/bench/src/core.rs b/substrate/bin/node/bench/src/core.rs index 26b7f92b1448376fe3d0a2c9553abd568189358f..56c0f3526a4dcecfdd49963194e8016d5cf5385c 100644 --- a/substrate/bin/node/bench/src/core.rs +++ b/substrate/bin/node/bench/src/core.rs @@ -16,8 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{fmt, borrow::{Cow, ToOwned}}; use serde::Serialize; +use std::{ + borrow::{Cow, ToOwned}, + fmt, +}; pub struct Path(Vec); @@ -33,7 +36,11 @@ impl Path { } pub fn full(&self) -> String { - self.0.iter().fold(String::new(), |mut val, next| { val.push_str("::"); val.push_str(next); val }) + self.0.iter().fold(String::new(), |mut val, next| { + val.push_str("::"); + val.push_str(next); + val + }) } pub fn has(&self, path: &str) -> bool { @@ -115,10 +122,7 @@ impl fmt::Display for BenchmarkOutput { } } -pub fn run_benchmark( - benchmark: Box, - mode: Mode, -) -> BenchmarkOutput { +pub fn run_benchmark(benchmark: Box, mode: Mode) -> BenchmarkOutput { let name = benchmark.name().to_owned(); let mut benchmark = benchmark.setup(); @@ -133,11 +137,7 @@ pub fn run_benchmark( let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; - BenchmarkOutput { - name: name.into(), - raw_average, - average, - } + BenchmarkOutput { name: name.into(), raw_average, average } } macro_rules! matrix( diff --git a/substrate/bin/node/bench/src/generator.rs b/substrate/bin/node/bench/src/generator.rs index c540ae147c9f0f59c13787f3b9233ba4c0abc042..e3aa1192b5d1fa7983d94ebd78e7ba02db0886a6 100644 --- a/substrate/bin/node/bench/src/generator.rs +++ b/substrate/bin/node/bench/src/generator.rs @@ -30,14 +30,15 @@ use crate::simple_trie::SimpleTrie; /// return root. pub fn generate_trie( db: Arc, - key_values: impl IntoIterator, Vec)>, + key_values: impl IntoIterator, Vec)>, ) -> Hash { let mut root = Hash::default(); let (db, overlay) = { let mut overlay = HashMap::new(); overlay.insert( - hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").expect("null key is valid"), + hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + .expect("null key is valid"), Some(vec![0]), ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; @@ -50,7 +51,7 @@ pub fn generate_trie( trie_db.commit(); } - ( trie.db, overlay ) + (trie.db, overlay) }; let mut transaction = db.transaction(); diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs index b4fee58dac0252332b6b1d72aeb80d05695a092a..a4056b49f7f4418a2e47db2f2d4b7729b5245b09 100644 --- a/substrate/bin/node/bench/src/import.rs +++ b/substrate/bin/node/bench/src/import.rs @@ -32,15 +32,15 @@ use std::borrow::Cow; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; use node_primitives::Block; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; use sc_client_api::backend::Backend; use sp_runtime::generic::BlockId; use sp_state_machine::InspectState; use crate::{ common::SizeType, - core::{self, Path, Mode}, + core::{self, Mode, Path}, }; pub struct ImportBenchmarkDescription { @@ -60,7 +60,6 @@ pub struct ImportBenchmark { impl core::BenchmarkDescription for ImportBenchmarkDescription { fn path(&self) -> Path { - let mut path = Path::new(&["node", "import"]); match self.profile { @@ -91,11 +90,7 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { fn setup(self: Box) -> Box { let profile = self.profile; - let mut bench_db = BenchDb::with_key_types( - self.database_type, - 50_000, - self.key_types - ); + let mut bench_db = BenchDb::with_key_types(self.database_type, 50_000, self.key_types); let block = bench_db.generate_block(self.block_type.to_content(self.size.transactions())); Box::new(ImportBenchmark { database: bench_db, @@ -108,11 +103,9 @@ impl core::BenchmarkDescription for ImportBenchmarkDescription { fn name(&self) -> Cow<'static, str> { format!( "Block import ({:?}/{}, {:?}, {:?} backend)", - self.block_type, - self.size, - self.profile, - self.database_type, - ).into() + self.block_type, self.size, self.profile, self.database_type, + ) + .into() } } @@ -120,7 +113,9 @@ impl core::Benchmark for ImportBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let mut context = self.database.create_context(self.profile); - let _ = context.client.runtime_version_at(&BlockId::Number(0)) + let _ = context + .client + .runtime_version_at(&BlockId::Number(0)) .expect("Failed to get runtime version") .spec_version; @@ -133,7 +128,8 @@ impl core::Benchmark for ImportBenchmark { let elapsed = start.elapsed(); // Sanity checks. - context.client + context + .client .state_at(&BlockId::number(1)) .expect("state_at failed for block#1") .inspect_state(|| { @@ -155,19 +151,17 @@ impl core::Benchmark for ImportBenchmark { BlockType::Noop => { assert_eq!( node_runtime::System::events().len(), - // should be 2 per signed extrinsic + 1 per unsigned // we have 1 unsigned and the rest are signed in the block // those 2 events per signed are: // - deposit event for charging transaction fee // - extrinsic success - (self.block.extrinsics.len() - 1) * 2 + 1, + (self.block.extrinsics.len() - 1) * 2 + 1, ); }, _ => {}, } - } - ); + }); if mode == Mode::Profile { std::thread::park_timeout(std::time::Duration::from_secs(1)); diff --git a/substrate/bin/node/bench/src/main.rs b/substrate/bin/node/bench/src/main.rs index 40e9e1577777e81c8cbbadfa2c0e619639022a30..4b006b387d0ea3427ca8b292193acf19046ed469 100644 --- a/substrate/bin/node/bench/src/main.rs +++ b/substrate/bin/node/bench/src/main.rs @@ -18,9 +18,10 @@ mod common; mod construct; -#[macro_use] mod core; -mod import; +#[macro_use] +mod core; mod generator; +mod import; mod simple_trie; mod state_sizes; mod tempdb; @@ -29,15 +30,15 @@ mod txpool; use structopt::StructOpt; -use node_testing::bench::{Profile, KeyTypes, BlockType, DatabaseType as BenchDataBaseType}; +use node_testing::bench::{BlockType, DatabaseType as BenchDataBaseType, KeyTypes, Profile}; use crate::{ common::SizeType, + construct::ConstructionBenchmarkDescription, core::{run_benchmark, Mode as BenchmarkMode}, - tempdb::DatabaseType, import::ImportBenchmarkDescription, - trie::{TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription, DatabaseSize}, - construct::ConstructionBenchmarkDescription, + tempdb::DatabaseType, + trie::{DatabaseSize, TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription}, txpool::PoolBenchmarkDescription, }; @@ -92,14 +93,25 @@ fn main() { SizeType::Large, SizeType::Full, SizeType::Custom(opt.transactions.unwrap_or(0)), - ].iter() { + ] + .iter() + { for block_type in [ BlockType::RandomTransfersKeepAlive, BlockType::RandomTransfersReaping, BlockType::Noop, - ].iter() { - for database_type in [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb].iter() { - import_benchmarks.push((profile, size.clone(), block_type.clone(), database_type)); + ] + .iter() + { + for database_type in + [BenchDataBaseType::RocksDb, BenchDataBaseType::ParityDb].iter() + { + import_benchmarks.push(( + profile, + size.clone(), + block_type.clone(), + database_type, + )); } } } @@ -163,7 +175,7 @@ fn main() { println!("{}: {}", benchmark.name(), benchmark.path().full()) } } - return; + return } let mut results = Vec::new(); @@ -183,7 +195,8 @@ fn main() { } if opt.json { - let json_result: String = serde_json::to_string(&results).expect("Failed to construct json"); + let json_result: String = + serde_json::to_string(&results).expect("Failed to construct json"); println!("{}", json_result); } } diff --git a/substrate/bin/node/bench/src/simple_trie.rs b/substrate/bin/node/bench/src/simple_trie.rs index a29b51a38af58bb80343edf72988f73a5fb64a47..651772c71575f3e4cf19de2d4bd9ed48f8033b8b 100644 --- a/substrate/bin/node/bench/src/simple_trie.rs +++ b/substrate/bin/node/bench/src/simple_trie.rs @@ -18,10 +18,10 @@ use std::{collections::HashMap, sync::Arc}; +use hash_db::{AsHashDB, HashDB, Hasher as _, Prefix}; use kvdb::KeyValueDB; use node_primitives::Hash; use sp_trie::DBValue; -use hash_db::{HashDB, AsHashDB, Prefix, Hasher as _}; pub type Hasher = sp_core::Blake2Hasher; @@ -32,7 +32,9 @@ pub struct SimpleTrie<'a> { } impl<'a> AsHashDB for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { + &*self + } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { &mut *self @@ -43,7 +45,7 @@ impl<'a> HashDB for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { - return value.clone(); + return value.clone() } self.db.get(0, &key).expect("Database backend error") } diff --git a/substrate/bin/node/bench/src/state_sizes.rs b/substrate/bin/node/bench/src/state_sizes.rs index f9288c10548981d87f14eb8cabbd836ceb83e520..27112ed42d455c8534fbae8fd4f8f2914ee22ef7 100644 --- a/substrate/bin/node/bench/src/state_sizes.rs +++ b/substrate/bin/node/bench/src/state_sizes.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . /// Kusama value size distribution -pub const KUSAMA_STATE_DISTRIBUTION: &'static[(u32, u32)] = &[ +pub const KUSAMA_STATE_DISTRIBUTION: &'static [(u32, u32)] = &[ (32, 35), (33, 20035), (34, 5369), diff --git a/substrate/bin/node/bench/src/tempdb.rs b/substrate/bin/node/bench/src/tempdb.rs index 31ef71fba7b5e34215210b72a80458555a516bd0..3c1c0f250e49f54dc5f93d65294612bf6343a84d 100644 --- a/substrate/bin/node/bench/src/tempdb.rs +++ b/substrate/bin/node/bench/src/tempdb.rs @@ -16,9 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use kvdb::{DBTransaction, KeyValueDB}; +use kvdb_rocksdb::{Database, DatabaseConfig}; use std::{io, path::PathBuf, sync::Arc}; -use kvdb::{KeyValueDB, DBTransaction}; -use kvdb_rocksdb::{DatabaseConfig, Database}; #[derive(Debug, Clone, Copy, derive_more::Display)] pub enum DatabaseType { @@ -44,13 +44,14 @@ impl KeyValueDB for ParityDbWrapper { /// Write a transaction of changes to the buffer. fn write(&self, transaction: DBTransaction) -> io::Result<()> { - self.0.commit( - transaction.ops.iter().map(|op| match op { - kvdb::DBOp::Insert { col, key, value } => (*col as u8, &key[key.len() - 32..], Some(value.to_vec())), + self.0 + .commit(transaction.ops.iter().map(|op| match op { + kvdb::DBOp::Insert { col, key, value } => + (*col as u8, &key[key.len() - 32..], Some(value.to_vec())), kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None), - kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!() - }) - ).expect("db error"); + kvdb::DBOp::DeletePrefix { col: _, prefix: _ } => unimplemented!(), + })) + .expect("db error"); Ok(()) } @@ -90,21 +91,19 @@ impl TempDatabase { match db_type { DatabaseType::RocksDb => { let db_cfg = DatabaseConfig::with_columns(1); - let db = Database::open(&db_cfg, &self.0.path().to_string_lossy()).expect("Database backend error"); + let db = Database::open(&db_cfg, &self.0.path().to_string_lossy()) + .expect("Database backend error"); Arc::new(db) }, - DatabaseType::ParityDb => { - Arc::new(ParityDbWrapper({ - let mut options = parity_db::Options::with_columns(self.0.path(), 1); - let mut column_options = &mut options.columns[0]; - column_options.ref_counted = true; - column_options.preimage = true; - column_options.uniform = true; - parity_db::Db::open(&options).expect("db open error") - })) - } + DatabaseType::ParityDb => Arc::new(ParityDbWrapper({ + let mut options = parity_db::Options::with_columns(self.0.path(), 1); + let mut column_options = &mut options.columns[0]; + column_options.ref_counted = true; + column_options.preimage = true; + column_options.uniform = true; + parity_db::Db::open(&options).expect("db open error") + })), } - } } @@ -121,15 +120,10 @@ impl Clone for TempDatabase { ); let self_db_files = std::fs::read_dir(self_dir) .expect("failed to list file in seed dir") - .map(|f_result| - f_result.expect("failed to read file in seed db") - .path() - ).collect::>(); - fs_extra::copy_items( - &self_db_files, - new_dir.path(), - &fs_extra::dir::CopyOptions::new(), - ).expect("Copy of seed database is ok"); + .map(|f_result| f_result.expect("failed to read file in seed db").path()) + .collect::>(); + fs_extra::copy_items(&self_db_files, new_dir.path(), &fs_extra::dir::CopyOptions::new()) + .expect("Copy of seed database is ok"); TempDatabase(new_dir) } diff --git a/substrate/bin/node/bench/src/trie.rs b/substrate/bin/node/bench/src/trie.rs index a3e7620473d98ba46646c8e8bc1ac1333c0535b5..a17e386ca879b292dc8ee7228e2716f4417213d2 100644 --- a/substrate/bin/node/bench/src/trie.rs +++ b/substrate/bin/node/bench/src/trie.rs @@ -18,13 +18,13 @@ //! Trie benchmark (integrated). -use std::{borrow::Cow, collections::HashMap, sync::Arc}; +use hash_db::Prefix; use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; -use hash_db::Prefix; use sp_state_machine::Backend as _; use sp_trie::{trie_types::TrieDBMut, TrieMut as _}; +use std::{borrow::Cow, collections::HashMap, sync::Arc}; use node_primitives::Hash; @@ -32,7 +32,7 @@ use crate::{ core::{self, Mode, Path}, generator::generate_trie, simple_trie::SimpleTrie, - tempdb::{TempDatabase, DatabaseType}, + tempdb::{DatabaseType, TempDatabase}, }; pub const SAMPLE_SIZE: usize = 100; @@ -142,10 +142,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); assert_eq!(query_keys.len(), SAMPLE_SIZE); - let root = generate_trie( - database.open(self.database_type), - key_values, - ); + let root = generate_trie(database.open(self.database_type), key_values); Box::new(TrieReadBenchmark { database, @@ -162,7 +159,8 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { self.database_size, pretty_print(self.database_size.keys()), self.database_type, - ).into() + ) + .into() } } @@ -182,12 +180,10 @@ impl core::Benchmark for TrieReadBenchmark { let storage: Arc> = Arc::new(Storage(db.open(self.database_type))); - let trie_backend = sp_state_machine::TrieBackend::new( - storage, - self.root, - ); + let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root); for (warmup_key, warmup_value) in self.warmup_keys.iter() { - let value = trie_backend.storage(&warmup_key[..]) + let value = trie_backend + .storage(&warmup_key[..]) .expect("Failed to get key: db error") .expect("Warmup key should exist"); @@ -218,7 +214,6 @@ pub struct TrieWriteBenchmarkDescription { pub database_type: DatabaseType, } - impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { fn path(&self) -> Path { let mut path = Path::new(&["trie", "write"]); @@ -253,10 +248,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); - let root = generate_trie( - database.open(self.database_type), - key_values, - ); + let root = generate_trie(database.open(self.database_type), key_values); Box::new(TrieWriteBenchmark { database, @@ -272,7 +264,8 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { self.database_size, pretty_print(self.database_size.keys()), self.database_type, - ).into() + ) + .into() } } @@ -292,15 +285,13 @@ impl core::Benchmark for TrieWriteBenchmark { let mut new_root = self.root.clone(); let mut overlay = HashMap::new(); - let mut trie = SimpleTrie { - db: kvdb.clone(), - overlay: &mut overlay, - }; - let mut trie_db_mut = TrieDBMut::from_existing(&mut trie, &mut new_root) - .expect("Failed to create TrieDBMut"); + let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay }; + let mut trie_db_mut = + TrieDBMut::from_existing(&mut trie, &mut new_root).expect("Failed to create TrieDBMut"); for (warmup_key, warmup_value) in self.warmup_keys.iter() { - let value = trie_db_mut.get(&warmup_key[..]) + let value = trie_db_mut + .get(&warmup_key[..]) .expect("Failed to get key: db error") .expect("Warmup key should exist"); @@ -367,7 +358,9 @@ impl SizePool { fn value(&self, rng: &mut R) -> Vec { let sr = (rng.next_u64() % self.total as u64) as u32; - let mut range = self.distribution.range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded)); + let mut range = self + .distribution + .range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded)); let size = *range.next().unwrap().1 as usize; random_vec(rng, size) } diff --git a/substrate/bin/node/bench/src/txpool.rs b/substrate/bin/node/bench/src/txpool.rs index ef1c816109c8d1370fe690391e75531c710dce58..b0db734534855413b25191e73127038bfff049df 100644 --- a/substrate/bin/node/bench/src/txpool.rs +++ b/substrate/bin/node/bench/src/txpool.rs @@ -23,13 +23,13 @@ use std::borrow::Cow; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes, DatabaseType}; +use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; use sc_transaction_pool::BasicPool; -use sp_runtime::generic::BlockId; use sc_transaction_pool_api::{TransactionPool, TransactionSource}; +use sp_runtime::generic::BlockId; -use crate::core::{self, Path, Mode}; +use crate::core::{self, Mode, Path}; pub struct PoolBenchmarkDescription { pub database_type: DatabaseType, @@ -46,11 +46,7 @@ impl core::BenchmarkDescription for PoolBenchmarkDescription { fn setup(self: Box) -> Box { Box::new(PoolBenchmark { - database: BenchDb::with_key_types( - self.database_type, - 50_000, - KeyTypes::Sr25519, - ), + database: BenchDb::with_key_types(self.database_type, 50_000, KeyTypes::Sr25519), }) } @@ -63,7 +59,9 @@ impl core::Benchmark for PoolBenchmark { fn run(&mut self, mode: Mode) -> std::time::Duration { let context = self.database.create_context(Profile::Wasm); - let _ = context.client.runtime_version_at(&BlockId::Number(0)) + let _ = context + .client + .runtime_version_at(&BlockId::Number(0)) .expect("Failed to get runtime version") .spec_version; @@ -80,22 +78,20 @@ impl core::Benchmark for PoolBenchmark { context.client.clone(), ); - let generated_transactions = self.database.block_content( - BlockType::RandomTransfersKeepAlive.to_content(Some(100)), - &context.client, - ).into_iter().collect::>(); + let generated_transactions = self + .database + .block_content( + BlockType::RandomTransfersKeepAlive.to_content(Some(100)), + &context.client, + ) + .into_iter() + .collect::>(); let start = std::time::Instant::now(); - let submissions = generated_transactions.into_iter().map(|tx| { - txpool.submit_one( - &BlockId::Number(0), - TransactionSource::External, - tx, - ) - }); - futures::executor::block_on( - futures::future::join_all(submissions) - ); + let submissions = generated_transactions + .into_iter() + .map(|tx| txpool.submit_one(&BlockId::Number(0), TransactionSource::External, tx)); + futures::executor::block_on(futures::future::join_all(submissions)); let elapsed = start.elapsed(); if mode == Mode::Profile { diff --git a/substrate/bin/node/browser-testing/src/lib.rs b/substrate/bin/node/browser-testing/src/lib.rs index a269e9cab21e42d22d3b29093afb03ebac53b687..35804bef2168e9407db2205243b8fb087753e5c6 100644 --- a/substrate/bin/node/browser-testing/src/lib.rs +++ b/substrate/bin/node/browser-testing/src/lib.rs @@ -28,11 +28,11 @@ //! flag and open a browser to the url that `wasm-pack test` outputs. //! For more infomation see . -use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; -use wasm_bindgen_futures::JsFuture; -use wasm_bindgen::JsValue; -use jsonrpc_core::types::{MethodCall, Success, Version, Params, Id}; +use jsonrpc_core::types::{Id, MethodCall, Params, Success, Version}; use serde::de::DeserializeOwned; +use wasm_bindgen::JsValue; +use wasm_bindgen_futures::JsFuture; +use wasm_bindgen_test::{wasm_bindgen_test, wasm_bindgen_test_configure}; wasm_bindgen_test_configure!(run_in_browser); @@ -41,8 +41,9 @@ fn rpc_call(method: &str) -> String { jsonrpc: Some(Version::V2), method: method.into(), params: Params::None, - id: Id::Num(1) - }).unwrap() + id: Id::Num(1), + }) + .unwrap() } fn deserialize_rpc_result(js_value: JsValue) -> T { @@ -55,15 +56,12 @@ fn deserialize_rpc_result(js_value: JsValue) -> T { #[wasm_bindgen_test] async fn runs() { - let mut client = node_cli::start_client(None, "info".into()) - .unwrap(); + let mut client = node_cli::start_client(None, "info".into()).unwrap(); // Check that the node handles rpc calls. // TODO: Re-add the code that checks if the node is syncing. let chain_name: String = deserialize_rpc_result( - JsFuture::from(client.rpc_send(&rpc_call("system_chain"))) - .await - .unwrap() + JsFuture::from(client.rpc_send(&rpc_call("system_chain"))).await.unwrap(), ); assert_eq!(chain_name, "Development"); } diff --git a/substrate/bin/node/cli/build.rs b/substrate/bin/node/cli/build.rs index befcdaea6d9cff98ce2df3ad334d1430efe718e8..90aec2222c9ece9be834f37b094ac0436622eff2 100644 --- a/substrate/bin/node/cli/build.rs +++ b/substrate/bin/node/cli/build.rs @@ -25,8 +25,8 @@ fn main() { mod cli { include!("src/cli.rs"); - use std::{fs, env, path::Path}; use sc_cli::structopt::clap::Shell; + use std::{env, fs, path::Path}; use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; pub fn main() { @@ -51,9 +51,12 @@ mod cli { Some(dir) => dir, }; let path = Path::new(&outdir) - .parent().unwrap() - .parent().unwrap() - .parent().unwrap() + .parent() + .unwrap() + .parent() + .unwrap() + .parent() + .unwrap() .join("completion-scripts"); fs::create_dir(&path).ok(); diff --git a/substrate/bin/node/cli/src/browser.rs b/substrate/bin/node/cli/src/browser.rs index 82f1921d2a6b5505a3446e533848499f09b6145a..dee93180e70d50586f4bec5fcb5c194d20f3ecbb 100644 --- a/substrate/bin/node/cli/src/browser.rs +++ b/substrate/bin/node/cli/src/browser.rs @@ -17,18 +17,14 @@ // along with this program. If not, see . use crate::chain_spec::ChainSpec; +use browser_utils::{browser_configuration, init_logging, set_console_error_panic_hook, Client}; use log::info; use wasm_bindgen::prelude::*; -use browser_utils::{ - Client, - browser_configuration, init_logging, set_console_error_panic_hook, -}; /// Starts the client. #[wasm_bindgen] pub fn start_client(chain_spec: Option, log_level: String) -> Result { - start_inner(chain_spec, log_level) - .map_err(|err| JsValue::from_str(&err.to_string())) + start_inner(chain_spec, log_level).map_err(|err| JsValue::from_str(&err.to_string())) } fn start_inner( @@ -53,10 +49,9 @@ fn start_inner( info!("👤 Role: {:?}", config.role); // Create the service. This is the most heavy initialization step. - let (task_manager, rpc_handlers) = - crate::service::new_light_base(config) - .map(|(components, rpc_handlers, _, _, _)| (components, rpc_handlers)) - .map_err(|e| format!("{:?}", e))?; + let (task_manager, rpc_handlers) = crate::service::new_light_base(config) + .map(|(components, rpc_handlers, _, _, _)| (components, rpc_handlers)) + .map_err(|e| format!("{:?}", e))?; Ok(browser_utils::start_client(task_manager, rpc_handlers)) } diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index e3ba16b9de6f3837a645a1055e67df3111f54820..2891736e5c225f763c5f4dd487f1918bc5b01c13 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -18,25 +18,26 @@ //! Substrate chain configurations. -use sc_chain_spec::ChainSpecExtension; -use sp_core::{Pair, Public, crypto::UncheckedInto, sr25519}; -use serde::{Serialize, Deserialize}; +use grandpa_primitives::AuthorityId as GrandpaId; +use hex_literal::hex; use node_runtime::{ - AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, CouncilConfig, - DemocracyConfig, GrandpaConfig, ImOnlineConfig, SessionConfig, SessionKeys, StakerStatus, - StakingConfig, ElectionsConfig, IndicesConfig, SocietyConfig, SudoConfig, SystemConfig, - TechnicalCommitteeConfig, wasm_binary_unwrap, MAX_NOMINATIONS, + constants::currency::*, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, + BalancesConfig, Block, CouncilConfig, DemocracyConfig, ElectionsConfig, GrandpaConfig, + ImOnlineConfig, IndicesConfig, SessionConfig, SessionKeys, SocietyConfig, StakerStatus, + StakingConfig, SudoConfig, SystemConfig, TechnicalCommitteeConfig, MAX_NOMINATIONS, }; -use node_runtime::Block; -use node_runtime::constants::currency::*; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sc_chain_spec::ChainSpecExtension; use sc_service::ChainType; -use hex_literal::hex; use sc_telemetry::TelemetryEndpoints; -use grandpa_primitives::{AuthorityId as GrandpaId}; -use sp_consensus_babe::{AuthorityId as BabeId}; -use pallet_im_online::sr25519::{AuthorityId as ImOnlineId}; +use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_runtime::{Perbill, traits::{Verify, IdentifyAccount}}; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; +use sp_runtime::{ + traits::{IdentifyAccount, Verify}, + Perbill, +}; pub use node_primitives::{AccountId, Balance, Signature}; pub use node_runtime::GenesisConfig; @@ -59,10 +60,7 @@ pub struct Extensions { } /// Specialized `ChainSpec`. -pub type ChainSpec = sc_service::GenericChainSpec< - GenesisConfig, - Extensions, ->; +pub type ChainSpec = sc_service::GenericChainSpec; /// Flaming Fir testnet generator pub fn flaming_fir_config() -> Result { ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..]) @@ -84,65 +82,94 @@ fn staging_testnet_config_genesis() -> GenesisConfig { // and // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done - let initial_authorities: Vec<(AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId)> = vec![( - // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy - hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), - // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq - hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), - // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC - hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - ),( - // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 - hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), - // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF - hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), - // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE - hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - ),( - // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp - hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), - // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 - hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), - // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d - hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - ),( - // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 - hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), - // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn - hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), - // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 - hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - )]; + let initial_authorities: Vec<( + AccountId, + AccountId, + GrandpaId, + BabeId, + ImOnlineId, + AuthorityDiscoveryId, + )> = vec![ + ( + // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy + hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), + // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq + hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), + // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC + hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + ), + ( + // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 + hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), + // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF + hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), + // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE + hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + ), + ( + // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp + hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), + // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 + hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), + // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d + hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + ), + ( + // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 + hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), + // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn + hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), + // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 + hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + ), + ]; // generated with secret: subkey inspect "$secret"/fir let root_key: AccountId = hex![ // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" - ].into(); + ] + .into(); let endowed_accounts: Vec = vec![root_key.clone()]; @@ -158,8 +185,10 @@ pub fn staging_testnet_config() -> ChainSpec { ChainType::Live, staging_testnet_config_genesis, boot_nodes, - Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) - .expect("Staging telemetry url is valid; qed")), + Some( + TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) + .expect("Staging telemetry url is valid; qed"), + ), None, None, Default::default(), @@ -174,21 +203,17 @@ pub fn get_from_seed(seed: &str) -> ::Pu } /// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, { AccountPublic::from(get_from_seed::(seed)).into_account() } /// Helper function to generate stash, controller and session key from seed -pub fn authority_keys_from_seed(seed: &str) -> ( - AccountId, - AccountId, - GrandpaId, - BabeId, - ImOnlineId, - AuthorityDiscoveryId, -) { +pub fn authority_keys_from_seed( + seed: &str, +) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId) { ( get_account_id_from_seed::(&format!("{}//stash", seed)), get_account_id_from_seed::(seed), @@ -230,11 +255,15 @@ pub fn testnet_genesis( ] }); // endow all authorities and nominators. - initial_authorities.iter().map(|x| &x.0).chain(initial_nominators.iter()).for_each(|x| { - if !endowed_accounts.contains(&x) { - endowed_accounts.push(x.clone()) - } - }); + initial_authorities + .iter() + .map(|x| &x.0) + .chain(initial_nominators.iter()) + .for_each(|x| { + if !endowed_accounts.contains(&x) { + endowed_accounts.push(x.clone()) + } + }); // stakers: all validators and nominators. let mut rng = rand::thread_rng(); @@ -266,22 +295,20 @@ pub fn testnet_genesis( changes_trie_config: Default::default(), }, balances: BalancesConfig { - balances: endowed_accounts.iter().cloned() - .map(|x| (x, ENDOWMENT)) - .collect() - }, - indices: IndicesConfig { - indices: vec![], + balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(), }, + indices: IndicesConfig { indices: vec![] }, session: SessionConfig { - keys: initial_authorities.iter().map(|x| { - (x.0.clone(), x.0.clone(), session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - )) - }).collect::>(), + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), + ) + }) + .collect::>(), }, staking: StakingConfig { validator_count: initial_authorities.len() as u32, @@ -289,47 +316,42 @@ pub fn testnet_genesis( invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), slash_reward_fraction: Perbill::from_percent(10), stakers, - .. Default::default() + ..Default::default() }, democracy: DemocracyConfig::default(), elections: ElectionsConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .map(|member| (member, STASH)) - .collect(), + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .map(|member| (member, STASH)) + .collect(), }, council: CouncilConfig::default(), technical_committee: TechnicalCommitteeConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect(), + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .collect(), phantom: Default::default(), }, - sudo: SudoConfig { - key: root_key, - }, + sudo: SudoConfig { key: root_key }, babe: BabeConfig { authorities: vec![], epoch_config: Some(node_runtime::BABE_GENESIS_EPOCH_CONFIG), }, - im_online: ImOnlineConfig { - keys: vec![], - }, - authority_discovery: AuthorityDiscoveryConfig { - keys: vec![], - }, - grandpa: GrandpaConfig { - authorities: vec![], - }, + im_online: ImOnlineConfig { keys: vec![] }, + authority_discovery: AuthorityDiscoveryConfig { keys: vec![] }, + grandpa: GrandpaConfig { authorities: vec![] }, technical_membership: Default::default(), treasury: Default::default(), society: SocietyConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect(), + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .collect(), pot: 0, max_members: 999, }, @@ -341,9 +363,7 @@ pub fn testnet_genesis( fn development_config_genesis() -> GenesisConfig { testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - ], + vec![authority_keys_from_seed("Alice")], vec![], get_account_id_from_seed::("Alice"), None, @@ -367,10 +387,7 @@ pub fn development_config() -> ChainSpec { fn local_testnet_genesis() -> GenesisConfig { testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - authority_keys_from_seed("Bob"), - ], + vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], vec![], get_account_id_from_seed::("Alice"), None, @@ -401,9 +418,7 @@ pub(crate) mod tests { fn local_testnet_genesis_instant_single() -> GenesisConfig { testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - ], + vec![authority_keys_from_seed("Alice")], vec![], get_account_id_from_seed::("Alice"), None, @@ -446,14 +461,24 @@ pub(crate) mod tests { sc_service_test::connectivity( integration_test_config_with_two_authorities(), |config| { - let NewFullBase { task_manager, client, network, transaction_pool, .. } - = new_full_base(config,|_, _| ())?; - Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) + let NewFullBase { task_manager, client, network, transaction_pool, .. } = + new_full_base(config, |_, _| ())?; + Ok(sc_service_test::TestNetComponents::new( + task_manager, + client, + network, + transaction_pool, + )) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) - } + Ok(sc_service_test::TestNetComponents::new( + keep_alive, + client, + network, + transaction_pool, + )) + }, ); } diff --git a/substrate/bin/node/cli/src/cli.rs b/substrate/bin/node/cli/src/cli.rs index 11ea58f4068df237287da25f8da25082617922c5..850581748fde3e229f5eb148ef7b016e5ea42f33 100644 --- a/substrate/bin/node/cli/src/cli.rs +++ b/substrate/bin/node/cli/src/cli.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_cli::{RunCmd, KeySubcommand, SignCmd, VanityCmd, VerifyCmd}; +use sc_cli::{KeySubcommand, RunCmd, SignCmd, VanityCmd, VerifyCmd}; use structopt::StructOpt; /// An overarching CLI command definition. diff --git a/substrate/bin/node/cli/src/command.rs b/substrate/bin/node/cli/src/command.rs index 1ef1da6ba6819593fcfe57b3a39a27856e7ca24a..b904ea99e8f9f404796eca247b6136f62821ea8b 100644 --- a/substrate/bin/node/cli/src/command.rs +++ b/substrate/bin/node/cli/src/command.rs @@ -16,12 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{chain_spec, service, Cli, Subcommand}; +use crate::{chain_spec, service, service::new_partial, Cli, Subcommand}; use node_executor::Executor; use node_runtime::{Block, RuntimeApi}; -use sc_cli::{Result, SubstrateCli, RuntimeVersion, Role, ChainSpec}; +use sc_cli::{ChainSpec, Result, Role, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; -use crate::service::new_partial; impl SubstrateCli for Cli { fn impl_name() -> String { @@ -49,17 +48,19 @@ impl SubstrateCli for Cli { } fn load_spec(&self, id: &str) -> std::result::Result, String> { - let spec = - match id { - "" => return Err("Please specify which chain you want to run, e.g. --dev or --chain=local".into()), - "dev" => Box::new(chain_spec::development_config()), - "local" => Box::new(chain_spec::local_testnet_config()), - "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), - "staging" => Box::new(chain_spec::staging_testnet_config()), - path => Box::new(chain_spec::ChainSpec::from_json_file( - std::path::PathBuf::from(path), - )?), - }; + let spec = match id { + "" => + return Err( + "Please specify which chain you want to run, e.g. --dev or --chain=local" + .into(), + ), + "dev" => Box::new(chain_spec::development_config()), + "local" => Box::new(chain_spec::local_testnet_config()), + "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), + "staging" => Box::new(chain_spec::staging_testnet_config()), + path => + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }; Ok(spec) } @@ -79,24 +80,25 @@ pub fn run() -> Result<()> { match config.role { Role::Light => service::new_light(config), _ => service::new_full(config), - }.map_err(sc_cli::Error::Service) + } + .map_err(sc_cli::Error::Service) }) - } + }, Some(Subcommand::Inspect(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::(config)) - } - Some(Subcommand::Benchmark(cmd)) => { + }, + Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run::(config)) } else { Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`.".into()) - } - } + You can enable it with `--features runtime-benchmarks`." + .into()) + }, Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::Sign(cmd)) => cmd.run(), Some(Subcommand::Verify(cmd)) => cmd.run(), @@ -108,32 +110,30 @@ pub fn run() -> Result<()> { Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = new_partial(&config)?; Ok((cmd.run(client, config.database), task_manager)) }) }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = new_partial(&config)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) }, @@ -144,8 +144,7 @@ pub fn run() -> Result<()> { Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { client, task_manager, backend, ..} - = new_partial(&config)?; + let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?; Ok((cmd.run(client, backend), task_manager)) }) }, @@ -156,18 +155,16 @@ pub fn run() -> Result<()> { // we don't need any of the components of new_partial, just a runtime, or a task // manager to do `async_run`. let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - let task_manager = sc_service::TaskManager::new( - config.task_executor.clone(), - registry, - ).map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; + let task_manager = + sc_service::TaskManager::new(config.task_executor.clone(), registry) + .map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; Ok((cmd.run::(config), task_manager)) }) }, #[cfg(not(feature = "try-runtime"))] - Some(Subcommand::TryRuntime) => { - Err("TryRuntime wasn't enabled when building the node. \ - You can enable it with `--features try-runtime`.".into()) - }, + Some(Subcommand::TryRuntime) => Err("TryRuntime wasn't enabled when building the node. \ + You can enable it with `--features try-runtime`." + .into()), } } diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 4886b798b050cdb0aa7b21f07753c7c2c25bb979..47bc5f5b021f5a308d56ffae3b60fc7a18359351 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -20,20 +20,17 @@ //! Service implementation. Specialized wrapper over substrate service. -use std::sync::Arc; -use sc_consensus_babe; +use futures::prelude::*; +use node_executor::Executor; use node_primitives::Block; use node_runtime::RuntimeApi; -use sc_service::{ - config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager, -}; -use sc_network::{Event, NetworkService}; -use sp_runtime::traits::Block as BlockT; -use futures::prelude::*; use sc_client_api::{ExecutorProvider, RemoteBackend}; -use node_executor::Executor; +use sc_consensus_babe::{self, SlotProportion}; +use sc_network::{Event, NetworkService}; +use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; -use sc_consensus_babe::SlotProportion; +use sp_runtime::traits::Block as BlockT; +use std::sync::Arc; type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; @@ -44,25 +41,29 @@ type LightClient = sc_service::TLightClient; pub fn new_partial( config: &Configuration, -) -> Result, - sc_transaction_pool::FullPool, - ( - impl Fn( - node_rpc::DenyUnsafe, - sc_rpc::SubscriptionTaskExecutor, - ) -> node_rpc::IoHandler, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sp_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, ( - sc_consensus_babe::BabeBlockImport, - grandpa::LinkHalf, - sc_consensus_babe::BabeLink, + impl Fn(node_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> node_rpc::IoHandler, + ( + sc_consensus_babe::BabeBlockImport, + grandpa::LinkHalf, + sc_consensus_babe::BabeLink, + ), + grandpa::SharedVoterState, + Option, ), - grandpa::SharedVoterState, - Option, - ) ->, ServiceError> { - let telemetry = config.telemetry_endpoints.clone() + >, + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, sc_telemetry::Error> { let worker = TelemetryWorker::new(16)?; @@ -78,11 +79,10 @@ pub fn new_partial( )?; let client = Arc::new(client); - let telemetry = telemetry - .map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); let select_chain = sc_consensus::LongestChain::new(backend.clone()); @@ -115,21 +115,19 @@ pub fn new_partial( Some(Box::new(justification_import)), client.clone(), select_chain.clone(), - move |_, ()| { - async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, - ); + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); - let uncles = - sp_authorship::InherentDataProvider::<::Header>::check_inherents(); + let uncles = + sp_authorship::InherentDataProvider::<::Header>::check_inherents(); - Ok((timestamp, slot, uncles)) - } + Ok((timestamp, slot, uncles)) }, &task_manager.spawn_essential_handle(), config.prometheus_registry(), @@ -213,7 +211,7 @@ pub fn new_full_base( with_startup_data: impl FnOnce( &sc_consensus_babe::BabeBlockImport, &sc_consensus_babe::BabeLink, - ) + ), ) -> Result { let sc_service::PartialComponents { client, @@ -238,7 +236,7 @@ pub fn new_full_base( task_manager.spawn_handle(), backend.clone(), import_setup.1.shared_authority_set().clone(), - ) + ), ); let (network, system_rpc_tx, network_starter) = @@ -254,7 +252,10 @@ pub fn new_full_base( if config.offchain_worker.enabled { sc_service::build_offchain_workers( - &config, task_manager.spawn_handle(), client.clone(), network.clone(), + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), ); } @@ -266,22 +267,20 @@ pub fn new_full_base( let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); - let _rpc_handlers = sc_service::spawn_tasks( - sc_service::SpawnTasksParams { - config, - backend: backend.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - network: network.clone(), - rpc_extensions_builder: Box::new(rpc_extensions_builder), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - on_demand: None, - remote_blockchain: None, - system_rpc_tx, - telemetry: telemetry.as_mut(), - }, - )?; + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + config, + backend: backend.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + network: network.clone(), + rpc_extensions_builder: Box::new(rpc_extensions_builder), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + on_demand: None, + remote_blockchain: None, + system_rpc_tx, + telemetry: telemetry.as_mut(), + })?; let (block_import, grandpa_link, babe_link) = import_setup; @@ -343,36 +342,37 @@ pub fn new_full_base( // Spawn authority discovery module. if role.is_authority() { - let authority_discovery_role = sc_authority_discovery::Role::PublishAndDiscover( - keystore_container.keystore(), - ); - let dht_event_stream = network.event_stream("authority-discovery") - .filter_map(|e| async move { match e { - Event::Dht(e) => Some(e), - _ => None, - }}); - let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service_with_config( - sc_authority_discovery::WorkerConfig { - publish_non_global_ips: auth_disc_publish_non_global_ips, - ..Default::default() - }, - client.clone(), - network.clone(), - Box::pin(dht_event_stream), - authority_discovery_role, - prometheus_registry.clone(), - ); + let authority_discovery_role = + sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()); + let dht_event_stream = + network.event_stream("authority-discovery").filter_map(|e| async move { + match e { + Event::Dht(e) => Some(e), + _ => None, + } + }); + let (authority_discovery_worker, _service) = + sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + ..Default::default() + }, + client.clone(), + network.clone(), + Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); - task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker.run()); + task_manager + .spawn_handle() + .spawn("authority-discovery-worker", authority_discovery_worker.run()); } // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None - }; + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; let config = grandpa::Config { // FIXME #1578 make this available through chainspec @@ -404,46 +404,41 @@ pub fn new_full_base( // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking( - "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? - ); + task_manager + .spawn_essential_handle() + .spawn_blocking("grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)?); } network_starter.start_network(); - Ok(NewFullBase { - task_manager, - client, - network, - transaction_pool, - }) + Ok(NewFullBase { task_manager, client, network, transaction_pool }) } /// Builds a new service for a full client. -pub fn new_full( - config: Configuration, -) -> Result { - new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| { - task_manager - }) +pub fn new_full(config: Configuration) -> Result { + new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| task_manager) } pub fn new_light_base( mut config: Configuration, -) -> Result<( - TaskManager, - RpcHandlers, - Arc, - Arc::Hash>>, - Arc>> -), ServiceError> { - let telemetry = config.telemetry_endpoints.clone() +) -> Result< + ( + TaskManager, + RpcHandlers, + Arc, + Arc::Hash>>, + Arc< + sc_transaction_pool::LightPool>, + >, + ), + ServiceError, +> { + let telemetry = config + .telemetry_endpoints + .clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, sc_telemetry::Error> { #[cfg(feature = "browser")] - let transport = Some( - sc_telemetry::ExtTransport::new(libp2p_wasm_ext::ffi::websocket_transport()) - ); + let transport = Some(sc_telemetry::ExtTransport::new(libp2p_wasm_ext::ffi::websocket_transport())); #[cfg(not(feature = "browser"))] let transport = None; @@ -459,11 +454,10 @@ pub fn new_light_base( telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), )?; - let mut telemetry = telemetry - .map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); @@ -567,71 +561,60 @@ pub fn new_light_base( let rpc_extensions = node_rpc::create_light(light_deps); - let rpc_handlers = - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - on_demand: Some(on_demand), - remote_blockchain: Some(backend.remote_blockchain()), - rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), - client: client.clone(), - transaction_pool: transaction_pool.clone(), - keystore: keystore_container.sync_keystore(), - config, backend, system_rpc_tx, - network: network.clone(), - task_manager: &mut task_manager, - telemetry: telemetry.as_mut(), - })?; + let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + on_demand: Some(on_demand), + remote_blockchain: Some(backend.remote_blockchain()), + rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), + client: client.clone(), + transaction_pool: transaction_pool.clone(), + keystore: keystore_container.sync_keystore(), + config, + backend, + system_rpc_tx, + network: network.clone(), + task_manager: &mut task_manager, + telemetry: telemetry.as_mut(), + })?; network_starter.start_network(); - Ok(( - task_manager, - rpc_handlers, - client, - network, - transaction_pool, - )) + Ok((task_manager, rpc_handlers, client, network, transaction_pool)) } /// Builds a new service for a light client. -pub fn new_light( - config: Configuration, -) -> Result { - new_light_base(config).map(|(task_manager, _, _, _, _)| { - task_manager - }) +pub fn new_light(config: Configuration) -> Result { + new_light_base(config).map(|(task_manager, _, _, _, _)| task_manager) } #[cfg(test)] mod tests { - use std::{sync::Arc, borrow::Cow, convert::TryInto}; - use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; + use crate::service::{new_full_base, new_light_base, NewFullBase}; + use codec::Encode; + use node_primitives::{Block, DigestItem, Signature}; + use node_runtime::{ + constants::{currency::CENTS, time::SLOT_DURATION}, + Address, BalancesCall, Call, UncheckedExtrinsic, + }; + use sc_client_api::BlockBackend; + use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; use sc_consensus_epochs::descendent_query; + use sc_keystore::LocalKeystore; + use sc_service_test::TestNetNode; + use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_consensus::{ - Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, - }; - use node_primitives::{Block, DigestItem, Signature}; - use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; - use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; - use codec::Encode; - use sp_core::{ - crypto::Pair as CryptoPair, - H256, - Public + BlockImport, BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, Proposer, }; - use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; + use sp_core::{crypto::Pair as CryptoPair, Public, H256}; + use sp_inherents::InherentDataProvider; + use sp_keyring::AccountKeyring; + use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - generic::{BlockId, Era, Digest, SignedPayload}, - traits::{Block as BlockT, Header as HeaderT}, - traits::Verify, + generic::{BlockId, Digest, Era, SignedPayload}, + key_types::BABE, + traits::{Block as BlockT, Header as HeaderT, IdentifyAccount, Verify}, + RuntimeAppPublic, }; use sp_timestamp; - use sp_keyring::AccountKeyring; - use sc_service_test::TestNetNode; - use crate::service::{new_full_base, new_light_base, NewFullBase}; - use sp_runtime::{key_types::BABE, traits::IdentifyAccount, RuntimeAppPublic}; - use sc_transaction_pool_api::{MaintainedTransactionPool, ChainEvent}; - use sc_client_api::BlockBackend; - use sc_keystore::LocalKeystore; - use sp_inherents::InherentDataProvider; + use std::{borrow::Cow, convert::TryInto, sync::Arc}; type AccountPublic = ::Signer; @@ -641,10 +624,12 @@ mod tests { #[ignore] fn test_sync() { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); - let alice: sp_consensus_babe::AuthorityId = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) - .expect("Creates authority pair").into(); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); + let alice: sp_consensus_babe::AuthorityId = + SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) + .expect("Creates authority pair") + .into(); let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); @@ -660,25 +645,31 @@ mod tests { chain_spec, |config| { let mut setup_handles = None; - let NewFullBase { - task_manager, client, network, transaction_pool, .. - } = new_full_base(config, - | - block_import: &sc_consensus_babe::BabeBlockImport, - babe_link: &sc_consensus_babe::BabeLink, - | { - setup_handles = Some((block_import.clone(), babe_link.clone())); - } - )?; + let NewFullBase { task_manager, client, network, transaction_pool, .. } = + new_full_base( + config, + |block_import: &sc_consensus_babe::BabeBlockImport, + babe_link: &sc_consensus_babe::BabeLink| { + setup_handles = Some((block_import.clone(), babe_link.clone())); + }, + )?; let node = sc_service_test::TestNetComponents::new( - task_manager, client, network, transaction_pool + task_manager, + client, + network, + transaction_pool, ); Ok((node, setup_handles.unwrap())) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + Ok(sc_service_test::TestNetComponents::new( + keep_alive, + client, + network, + transaction_pool, + )) }, |service, &mut (ref mut block_import, ref babe_link)| { let parent_id = BlockId::number(service.client().chain_info().best_number); @@ -686,14 +677,9 @@ mod tests { let parent_hash = parent_header.hash(); let parent_number = *parent_header.number(); - futures::executor::block_on( - service.transaction_pool().maintain( - ChainEvent::NewBestBlock { - hash: parent_header.hash(), - tree_route: None, - }, - ) - ); + futures::executor::block_on(service.transaction_pool().maintain( + ChainEvent::NewBestBlock { hash: parent_header.hash(), tree_route: None }, + )); let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( service.spawn_handle(), @@ -708,23 +694,30 @@ mod tests { // even though there's only one authority some slots might be empty, // so we must keep trying the next slots until we can claim one. let (babe_pre_digest, epoch_descriptor) = loop { - let epoch_descriptor = babe_link.epoch_changes().shared_data().epoch_descriptor_for_child_of( - descendent_query(&*service.client()), - &parent_hash, - parent_number, - slot.into(), - ).unwrap().unwrap(); - - let epoch = babe_link.epoch_changes().shared_data().epoch_data( - &epoch_descriptor, - |slot| sc_consensus_babe::Epoch::genesis(&babe_link.config(), slot), - ).unwrap(); - - if let Some(babe_pre_digest) = sc_consensus_babe::authorship::claim_slot( - slot.into(), - &epoch, - &keystore, - ).map(|(digest, _)| digest) { + let epoch_descriptor = babe_link + .epoch_changes() + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*service.client()), + &parent_hash, + parent_number, + slot.into(), + ) + .unwrap() + .unwrap(); + + let epoch = babe_link + .epoch_changes() + .shared_data() + .epoch_data(&epoch_descriptor, |slot| { + sc_consensus_babe::Epoch::genesis(&babe_link.config(), slot) + }) + .unwrap(); + + if let Some(babe_pre_digest) = + sc_consensus_babe::authorship::claim_slot(slot.into(), &epoch, &keystore) + .map(|(digest, _)| digest) + { break (babe_pre_digest, epoch_descriptor) } @@ -736,19 +729,21 @@ mod tests { std::time::Duration::from_millis(SLOT_DURATION * slot).into(), ), sp_consensus_babe::inherents::InherentDataProvider::new(slot.into()), - ).create_inherent_data().expect("Creates inherent data"); + ) + .create_inherent_data() + .expect("Creates inherent data"); digest.push(::babe_pre_digest(babe_pre_digest)); let new_block = futures::executor::block_on(async move { let proposer = proposer_factory.init(&parent_header).await; - proposer.unwrap().propose( - inherent_data, - digest, - std::time::Duration::from_secs(1), - None, - ).await - }).expect("Error making test block").block; + proposer + .unwrap() + .propose(inherent_data, digest, std::time::Duration::from_secs(1), None) + .await + }) + .expect("Error making test block") + .block; let (new_header, new_body) = new_block.deconstruct(); let pre_hash = new_header.hash(); @@ -760,10 +755,12 @@ mod tests { sp_consensus_babe::AuthorityId::ID, &alice.to_public_crypto_pair(), &to_sign, - ).unwrap().unwrap().try_into().unwrap(); - let item = ::babe_seal( - signature, - ); + ) + .unwrap() + .unwrap() + .try_into() + .unwrap(); + let item = ::babe_seal(signature); slot += 1; let mut params = BlockImportParams::new(BlockOrigin::File, new_header); @@ -811,19 +808,13 @@ mod tests { let raw_payload = SignedPayload::from_raw( function, extra, - (spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()) + (spec_version, transaction_version, genesis_hash, genesis_hash, (), (), ()), ); - let signature = raw_payload.using_encoded(|payload| { - signer.sign(payload) - }); + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); let (function, extra, _) = raw_payload.deconstruct(); index += 1; - UncheckedExtrinsic::new_signed( - function, - from.into(), - signature.into(), - extra, - ).into() + UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), extra) + .into() }, ); } @@ -834,18 +825,25 @@ mod tests { sc_service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), |config| { - let NewFullBase { task_manager, client, network, transaction_pool, .. } - = new_full_base(config,|_, _| ())?; - Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) + let NewFullBase { task_manager, client, network, transaction_pool, .. } = + new_full_base(config, |_, _| ())?; + Ok(sc_service_test::TestNetComponents::new( + task_manager, + client, + network, + transaction_pool, + )) }, |config| { let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) + Ok(sc_service_test::TestNetComponents::new( + keep_alive, + client, + network, + transaction_pool, + )) }, - vec![ - "//Alice".into(), - "//Bob".into(), - ], + vec!["//Alice".into(), "//Bob".into()], ) } } diff --git a/substrate/bin/node/cli/tests/common.rs b/substrate/bin/node/cli/tests/common.rs index c3bb96555da56a64f3c16c988ff3a3b1b1dadea0..50776202d79eb56018476feb3928002b812463c4 100644 --- a/substrate/bin/node/cli/tests/common.rs +++ b/substrate/bin/node/cli/tests/common.rs @@ -18,11 +18,18 @@ #![cfg(unix)] -use std::{process::{Child, ExitStatus}, thread, time::Duration, path::Path}; use assert_cmd::cargo::cargo_bin; -use std::{convert::TryInto, process::Command}; -use nix::sys::signal::{kill, Signal::SIGINT}; -use nix::unistd::Pid; +use nix::{ + sys::signal::{kill, Signal::SIGINT}, + unistd::Pid, +}; +use std::{ + convert::TryInto, + path::Path, + process::{Child, Command, ExitStatus}, + thread, + time::Duration, +}; /// Wait for the given `child` the given number of `secs`. /// @@ -50,12 +57,7 @@ pub fn wait_for(child: &mut Child, secs: usize) -> Option { pub fn run_dev_node_for_a_while(base_path: &Path) { let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd - .args(&["--dev"]) - .arg("-d") - .arg(base_path) - .spawn() - .unwrap(); + let mut cmd = cmd.args(&["--dev"]).arg("-d").arg(base_path).spawn().unwrap(); // Let it produce some blocks. thread::sleep(Duration::from_secs(30)); diff --git a/substrate/bin/node/cli/tests/export_import_flow.rs b/substrate/bin/node/cli/tests/export_import_flow.rs index 02fba49e834efc4ac61f802d9223c190134cac99..583445434d39165445b90914ba637abad9da4462 100644 --- a/substrate/bin/node/cli/tests/export_import_flow.rs +++ b/substrate/bin/node/cli/tests/export_import_flow.rs @@ -19,9 +19,9 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; -use std::{process::Command, fs, path::PathBuf}; -use tempfile::{tempdir, TempDir}; use regex::Regex; +use std::{fs, path::PathBuf, process::Command}; +use tempfile::{tempdir, TempDir}; pub mod common; @@ -63,26 +63,23 @@ impl<'a> ExportImportRevertExecutor<'a> { fn new( base_path: &'a TempDir, exported_blocks_file: &'a PathBuf, - db_path: &'a PathBuf + db_path: &'a PathBuf, ) -> Self { - Self { - base_path, - exported_blocks_file, - db_path, - num_exported_blocks: None, - } + Self { base_path, exported_blocks_file, db_path, num_exported_blocks: None } } /// Helper method to run a command. Returns a string corresponding to what has been logged. - fn run_block_command(&self, + fn run_block_command( + &self, sub_command: SubCommand, format_opt: FormatOpt, - expected_to_fail: bool + expected_to_fail: bool, ) -> String { let sub_command_str = sub_command.to_string(); // Adding "--binary" if need be. let arguments: Vec<&str> = match format_opt { - FormatOpt::Binary => vec![&sub_command_str, "--dev", "--pruning", "archive", "--binary", "-d"], + FormatOpt::Binary => + vec![&sub_command_str, "--dev", "--pruning", "archive", "--binary", "-d"], FormatOpt::Json => vec![&sub_command_str, "--dev", "--pruning", "archive", "-d"], }; @@ -94,7 +91,7 @@ impl<'a> ExportImportRevertExecutor<'a> { SubCommand::ImportBlocks => { tmp = tempdir().unwrap(); tmp.path() - } + }, }; // Running the command and capturing the output. @@ -144,16 +141,13 @@ impl<'a> ExportImportRevertExecutor<'a> { if !expected_to_fail { // Using regex to find out how much block we imported, // and what's the best current block. - let re = Regex::new(r"Imported (?P\d*) blocks. Best: #(?P\d*)").unwrap(); + let re = + Regex::new(r"Imported (?P\d*) blocks. Best: #(?P\d*)").unwrap(); let caps = re.captures(&log).expect("capture should have succeeded"); let imported = caps["imported"].parse::().unwrap(); let best = caps["best"].parse::().unwrap(); - assert_eq!( - imported, - best, - "numbers of blocks imported and best number differs" - ); + assert_eq!(imported, best, "numbers of blocks imported and best number differs"); assert_eq!( best, self.num_exported_blocks.expect("number of exported blocks cannot be None; qed"), @@ -195,11 +189,7 @@ fn export_import_revert() { common::run_dev_node_for_a_while(base_path.path()); - let mut executor = ExportImportRevertExecutor::new( - &base_path, - &exported_blocks_file, - &db_path, - ); + let mut executor = ExportImportRevertExecutor::new(&base_path, &exported_blocks_file, &db_path); // Binary and binary should work. executor.run(FormatOpt::Binary, FormatOpt::Binary, false); diff --git a/substrate/bin/node/cli/tests/running_the_node_and_interrupt.rs b/substrate/bin/node/cli/tests/running_the_node_and_interrupt.rs index 05eb9a7027b714667d66d8704f4b6d18a205e835..7a945a30a4166fe1e2b6e27567ecf3cf79128e6e 100644 --- a/substrate/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/substrate/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -25,8 +25,13 @@ pub mod common; #[test] #[cfg(unix)] fn running_the_node_works_and_can_be_interrupted() { - use nix::sys::signal::{kill, Signal::{self, SIGINT, SIGTERM}}; - use nix::unistd::Pid; + use nix::{ + sys::signal::{ + kill, + Signal::{self, SIGINT, SIGTERM}, + }, + unistd::Pid, + }; fn run_command_and_kill(signal: Signal) { let base_path = tempdir().expect("could not create a temp dir"); diff --git a/substrate/bin/node/cli/tests/telemetry.rs b/substrate/bin/node/cli/tests/telemetry.rs index 0b90f56a03998509c7c15cbff921ae7cd94dbc4b..78a306284c4acfeacf79b39aa96a4d604643b93f 100644 --- a/substrate/bin/node/cli/tests/telemetry.rs +++ b/substrate/bin/node/cli/tests/telemetry.rs @@ -17,10 +17,11 @@ // along with this program. If not, see . use assert_cmd::cargo::cargo_bin; -use nix::sys::signal::{kill, Signal::SIGINT}; -use nix::unistd::Pid; -use std::convert::TryInto; -use std::process; +use nix::{ + sys::signal::{kill, Signal::SIGINT}, + unistd::Pid, +}; +use std::{convert::TryInto, process}; pub mod common; pub mod websocket_server; @@ -45,27 +46,22 @@ async fn telemetry_works() { Event::ConnectionOpen { address } => { println!("New connection from {:?}", address); server.accept(); - } + }, // Received a message from a connection. Event::BinaryFrame { message, .. } => { let json: serde_json::Value = serde_json::from_slice(&message).unwrap(); - let object = json - .as_object() - .unwrap() - .get("payload") - .unwrap() - .as_object() - .unwrap(); + let object = + json.as_object().unwrap().get("payload").unwrap().as_object().unwrap(); if matches!(object.get("best"), Some(serde_json::Value::String(_))) { - break; + break } - } + }, Event::TextFrame { .. } => panic!("Got a TextFrame over the socket, this is a bug"), // Connection has been closed. - Event::ConnectionError { .. } => {} + Event::ConnectionError { .. } => {}, } } }); @@ -83,16 +79,11 @@ async fn telemetry_works() { server_task.await; - assert!( - substrate.try_wait().unwrap().is_none(), - "the process should still be running" - ); + assert!(substrate.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process kill(Pid::from_raw(substrate.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut substrate, 40) - .map(|x| x.success()) - .unwrap_or_default()); + assert!(common::wait_for(&mut substrate, 40).map(|x| x.success()).unwrap_or_default()); let output = substrate.wait_with_output().unwrap(); diff --git a/substrate/bin/node/cli/tests/temp_base_path_works.rs b/substrate/bin/node/cli/tests/temp_base_path_works.rs index 0152ddb464dc7bd3cf8d4575af6d86f650e6bf7a..c107740b9b0a539177d7ba4cffc030b7bdada947 100644 --- a/substrate/bin/node/cli/tests/temp_base_path_works.rs +++ b/substrate/bin/node/cli/tests/temp_base_path_works.rs @@ -19,15 +19,19 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; -use nix::sys::signal::{kill, Signal::SIGINT}; -use nix::unistd::Pid; +use nix::{ + sys::signal::{kill, Signal::SIGINT}, + unistd::Pid, +}; use regex::Regex; -use std::convert::TryInto; -use std::io::Read; -use std::path::PathBuf; -use std::process::{Command, Stdio}; -use std::thread; -use std::time::Duration; +use std::{ + convert::TryInto, + io::Read, + path::PathBuf, + process::{Command, Stdio}, + thread, + time::Duration, +}; pub mod common; @@ -44,29 +48,18 @@ fn temp_base_path_works() { // Let it produce some blocks. thread::sleep(Duration::from_secs(30)); - assert!( - cmd.try_wait().unwrap().is_none(), - "the process should still be running" - ); + assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut cmd, 40) - .map(|x| x.success()) - .unwrap_or_default()); + assert!(common::wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); // Ensure the database has been deleted let mut stderr = String::new(); cmd.stderr.unwrap().read_to_string(&mut stderr).unwrap(); let re = Regex::new(r"Database: .+ at (\S+)").unwrap(); - let db_path = PathBuf::from( - re.captures(stderr.as_str()) - .unwrap() - .get(1) - .unwrap() - .as_str() - .to_string(), - ); + let db_path = + PathBuf::from(re.captures(stderr.as_str()).unwrap().get(1).unwrap().as_str().to_string()); assert!(!db_path.exists()); } diff --git a/substrate/bin/node/cli/tests/version.rs b/substrate/bin/node/cli/tests/version.rs index 38e4b1fbda72eae4d5dc483c0da8b8cb39f21b41..5ed3a9a8800c871ddbaef5a6c0f9a2732d15cff0 100644 --- a/substrate/bin/node/cli/tests/version.rs +++ b/substrate/bin/node/cli/tests/version.rs @@ -22,61 +22,45 @@ use regex::Regex; use std::process::Command; fn expected_regex() -> Regex { - Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+|unknown)-(.+?)-(.+?)(?:-(.+))?$").unwrap() + Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+|unknown)-(.+?)-(.+?)(?:-(.+))?$") + .unwrap() } #[test] fn version_is_full() { let expected = expected_regex(); - let output = Command::new(cargo_bin("substrate")) - .args(&["--version"]) - .output() - .unwrap(); + let output = Command::new(cargo_bin("substrate")).args(&["--version"]).output().unwrap(); - assert!( - output.status.success(), - "command returned with non-success exit code" - ); + assert!(output.status.success(), "command returned with non-success exit code"); let output = String::from_utf8_lossy(&output.stdout).trim().to_owned(); - let captures = expected - .captures(output.as_str()) - .expect("could not parse version in output"); + let captures = expected.captures(output.as_str()).expect("could not parse version in output"); assert_eq!(&captures[1], env!("CARGO_PKG_VERSION")); assert_eq!(&captures[3], TARGET_ARCH.as_str()); assert_eq!(&captures[4], TARGET_OS.as_str()); - assert_eq!( - captures.get(5).map(|x| x.as_str()), - TARGET_ENV.map(|x| x.as_str()) - ); + assert_eq!(captures.get(5).map(|x| x.as_str()), TARGET_ENV.map(|x| x.as_str())); } #[test] fn test_regex_matches_properly() { let expected = expected_regex(); - let captures = expected - .captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu") - .unwrap(); + let captures = expected.captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu").unwrap(); assert_eq!(&captures[1], "2.0.0"); assert_eq!(&captures[2], "da487d19d"); assert_eq!(&captures[3], "x86_64"); assert_eq!(&captures[4], "linux"); assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - let captures = expected - .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu") - .unwrap(); + let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu").unwrap(); assert_eq!(&captures[1], "2.0.0-alpha.5"); assert_eq!(&captures[2], "da487d19d"); assert_eq!(&captures[3], "x86_64"); assert_eq!(&captures[4], "linux"); assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - let captures = expected - .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux") - .unwrap(); + let captures = expected.captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux").unwrap(); assert_eq!(&captures[1], "2.0.0-alpha.5"); assert_eq!(&captures[2], "da487d19d"); assert_eq!(&captures[3], "x86_64"); diff --git a/substrate/bin/node/cli/tests/websocket_server.rs b/substrate/bin/node/cli/tests/websocket_server.rs index a8af1c3599521d3e1070df7e496d282a291e3fc6..658b8de4634546bb51b102b3267c59c57002a509 100644 --- a/substrate/bin/node/cli/tests/websocket_server.rs +++ b/substrate/bin/node/cli/tests/websocket_server.rs @@ -116,7 +116,6 @@ impl WsServer { /// # Panic /// /// Panics if no connection is pending. - /// pub fn accept(&mut self) { let pending_incoming = self.pending_incoming.take().expect("no pending socket"); @@ -129,15 +128,10 @@ impl WsServer { }; match server - .send_response(&{ - Response::Accept { - key: &websocket_key, - protocol: None, - } - }) + .send_response(&{ Response::Accept { key: &websocket_key, protocol: None } }) .await { - Ok(()) => {} + Ok(()) => {}, Err(err) => return Err(Box::new(err) as Box<_>), }; @@ -153,7 +147,6 @@ impl WsServer { /// # Panic /// /// Panics if no connection is pending. - /// pub fn reject(&mut self) { let _ = self.pending_incoming.take().expect("no pending socket"); } diff --git a/substrate/bin/node/executor/benches/bench.rs b/substrate/bin/node/executor/benches/bench.rs index d21aedd1d1849438f009ee42f73da1fd1db5e55b..8ac4b90150806e8326962f1398175531319c9e31 100644 --- a/substrate/bin/node/executor/benches/bench.rs +++ b/substrate/bin/node/executor/benches/bench.rs @@ -16,29 +16,33 @@ // limitations under the License. use codec::{Decode, Encode}; -use criterion::{BatchSize, Criterion, criterion_group, criterion_main}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use frame_support::Hashable; use node_executor::Executor; use node_primitives::{BlockNumber, Hash}; use node_runtime::{ - Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, UncheckedExtrinsic, + constants::currency::*, Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, + UncheckedExtrinsic, }; -use node_runtime::constants::currency::*; use node_testing::keyring::*; -use sp_core::{NativeOrEncoded, NeverNativeValue}; -use sp_core::storage::well_known_keys; -use sp_core::traits::{CodeExecutor, RuntimeCode}; -use frame_support::Hashable; -use sp_state_machine::TestExternalities as CoreTestExternalities; -use sc_executor::{NativeExecutor, RuntimeInfo, WasmExecutionMethod, Externalities}; +use sc_executor::{Externalities, NativeExecutor, RuntimeInfo, WasmExecutionMethod}; +use sp_core::{ + storage::well_known_keys, + traits::{CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, +}; use sp_runtime::traits::BlakeTwo256; +use sp_state_machine::TestExternalities as CoreTestExternalities; criterion_group!(benches, bench_execute_block); criterion_main!(benches); /// The wasm runtime code. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect("Development wasm binary is not available. \ - Testing is only supported with the flag disabled.") + node_runtime::WASM_BINARY.expect( + "Development wasm binary is not available. \ + Testing is only supported with the flag disabled.", + ) } const GENESIS_HASH: [u8; 32] = [69u8; 32]; @@ -66,7 +70,9 @@ fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities( parent_hash: Hash, extrinsics: Vec, ) -> (Vec, Hash) { - use sp_trie::{TrieConfiguration, trie_types::Layout}; + use sp_trie::{trie_types::Layout, TrieConfiguration}; // sign extrinsics. let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = Layout::::ordered_trie_root( - extrinsics.iter().map(Encode::encode) - ).to_fixed_bytes() - .into(); + let extrinsics_root = + Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, @@ -103,34 +109,44 @@ fn construct_block( }; // execute the block to get the real header. - executor.call:: _>( - ext, - &runtime_code, - "Core_initialize_block", - &header.encode(), - true, - None, - ).0.unwrap(); - - for i in extrinsics.iter() { - executor.call:: _>( + executor + .call:: _>( ext, &runtime_code, - "BlockBuilder_apply_extrinsic", - &i.encode(), + "Core_initialize_block", + &header.encode(), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); + + for i in extrinsics.iter() { + executor + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_apply_extrinsic", + &i.encode(), + true, + None, + ) + .0 + .unwrap(); } - let header = match executor.call:: _>( - ext, - &runtime_code, - "BlockBuilder_finalize_block", - &[0u8;0], - true, - None, - ).0.unwrap() { + let header = match executor + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_finalize_block", + &[0u8; 0], + true, + None, + ) + .0 + .unwrap() + { NativeOrEncoded::Native(_) => unreachable!(), NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), }; @@ -139,29 +155,21 @@ fn construct_block( (Block { header, extrinsics }.encode(), hash.into()) } -fn test_blocks(genesis_config: &GenesisConfig, executor: &NativeExecutor) - -> Vec<(Vec, Hash)> -{ +fn test_blocks( + genesis_config: &GenesisConfig, + executor: &NativeExecutor, +) -> Vec<(Vec, Hash)> { let mut test_ext = new_test_ext(genesis_config); - let mut block1_extrinsics = vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(0)), - }, - ]; - block1_extrinsics.extend((0..20).map(|i| { - CheckedExtrinsic { - signed: Some((alice(), signed_extra(i, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 1 * DOLLARS)), - } + let mut block1_extrinsics = vec![CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(0)), + }]; + block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { + signed: Some((alice(), signed_extra(i, 0))), + function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 1 * DOLLARS)), })); - let block1 = construct_block( - executor, - &mut test_ext.ext(), - 1, - GENESIS_HASH.into(), - block1_extrinsics, - ); + let block1 = + construct_block(executor, &mut test_ext.ext(), 1, GENESIS_HASH.into(), block1_extrinsics); vec![block1] } @@ -176,47 +184,47 @@ fn bench_execute_block(c: &mut Criterion) { ]; for strategy in execution_methods { - group.bench_function( - format!("{:?}", strategy), - |b| { - let genesis_config = node_testing::genesis::config(false, Some(compact_code_unwrap())); - let (use_native, wasm_method) = match strategy { - ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), - ExecutionMethod::Wasm(wasm_method) => (false, wasm_method), - }; - - let executor = NativeExecutor::new(wasm_method, None, 8); - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), - hash: vec![1, 2, 3], - heap_pages: None, - }; - - // Get the runtime version to initialize the runtimes cache. - { - let mut test_ext = new_test_ext(&genesis_config); - executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); - } - - let blocks = test_blocks(&genesis_config, &executor); - - b.iter_batched_ref( - || new_test_ext(&genesis_config), - |test_ext| { - for block in blocks.iter() { - executor.call:: _>( + group.bench_function(format!("{:?}", strategy), |b| { + let genesis_config = node_testing::genesis::config(false, Some(compact_code_unwrap())); + let (use_native, wasm_method) = match strategy { + ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), + ExecutionMethod::Wasm(wasm_method) => (false, wasm_method), + }; + + let executor = NativeExecutor::new(wasm_method, None, 8); + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(compact_code_unwrap().into()), + hash: vec![1, 2, 3], + heap_pages: None, + }; + + // Get the runtime version to initialize the runtimes cache. + { + let mut test_ext = new_test_ext(&genesis_config); + executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); + } + + let blocks = test_blocks(&genesis_config, &executor); + + b.iter_batched_ref( + || new_test_ext(&genesis_config), + |test_ext| { + for block in blocks.iter() { + executor + .call:: _>( &mut test_ext.ext(), &runtime_code, "Core_execute_block", &block.0, use_native, None, - ).0.unwrap(); - } - }, - BatchSize::LargeInput, - ); - }, - ); + ) + .0 + .unwrap(); + } + }, + BatchSize::LargeInput, + ); + }); } } diff --git a/substrate/bin/node/executor/src/lib.rs b/substrate/bin/node/executor/src/lib.rs index e7fb09a19c514f6d9773c71137da8d3aec16886b..0f4bfcf2eee264328e331bc724fe1b1acc83d694 100644 --- a/substrate/bin/node/executor/src/lib.rs +++ b/substrate/bin/node/executor/src/lib.rs @@ -18,8 +18,8 @@ //! A `CodeExecutor` specialization which uses natively compiled runtime when the wasm to be //! executed is equivalent to the natively compiled code. -pub use sc_executor::NativeExecutor; use sc_executor::native_executor_instance; +pub use sc_executor::NativeExecutor; // Declare an instance of the native executor named `Executor`. Include the wasm binary as the // equivalent wasm code. diff --git a/substrate/bin/node/executor/tests/basic.rs b/substrate/bin/node/executor/tests/basic.rs index 4e173667959090c058bee83186b895dc55ef7e69..f3beb93f598bb554d72998fda0aed3d1edced7e3 100644 --- a/substrate/bin/node/executor/tests/basic.rs +++ b/substrate/bin/node/executor/tests/basic.rs @@ -15,30 +15,28 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode, Joiner}; +use codec::{Decode, Encode, Joiner}; use frame_support::{ traits::Currency, - weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; -use sp_core::{NeverNativeValue, traits::Externalities, storage::well_known_keys}; +use frame_system::{self, AccountInfo, EventRecord, Phase}; +use sp_core::{storage::well_known_keys, traits::Externalities, NeverNativeValue}; use sp_runtime::{ - ApplyExtrinsicResult, - traits::Hash as HashT, - transaction_validity::InvalidTransaction, + traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, }; -use frame_system::{self, EventRecord, Phase, AccountInfo}; +use node_primitives::{Balance, Hash}; use node_runtime::{ - Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Call, Runtime, Balances, - System, TransactionPayment, Event, - constants::{time::SLOT_DURATION, currency::*}, + constants::{currency::*, time::SLOT_DURATION}, + Balances, Block, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, + UncheckedExtrinsic, }; -use node_primitives::{Balance, Hash}; -use wat; use node_testing::keyring::*; +use wat; pub mod common; -use self::common::{*, sign}; +use self::common::{sign, *}; /// The wasm runtime binary which hasn't undergone the compacting process. /// @@ -46,8 +44,10 @@ use self::common::{*, sign}; /// have to execute provided wasm code instead of the native equivalent. This trick is used to /// test code paths that differ between native and wasm versions. pub fn bloaty_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY_BLOATY.expect("Development wasm binary is not available. \ - Testing is only supported with the flag disabled.") + node_runtime::WASM_BINARY_BLOATY.expect( + "Development wasm binary is not available. \ + Testing is only supported with the flag disabled.", + ) } /// Default transfer fee. This will use the same logic that is implemented in transaction-payment module. @@ -87,7 +87,10 @@ fn changes_trie_block() -> (Vec, Hash) { }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), + function: Call::Balances(pallet_balances::Call::transfer( + bob().into(), + 69 * DOLLARS, + )), }, ], (time / SLOT_DURATION).into(), @@ -111,7 +114,10 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), + function: Call::Balances(pallet_balances::Call::transfer( + bob().into(), + 69 * DOLLARS, + )), }, ], (time1 / SLOT_DURATION).into(), @@ -128,12 +134,18 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { }, CheckedExtrinsic { signed: Some((bob(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(alice().into(), 5 * DOLLARS)), + function: Call::Balances(pallet_balances::Call::transfer( + alice().into(), + 5 * DOLLARS, + )), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(1, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 15 * DOLLARS)), - } + function: Call::Balances(pallet_balances::Call::transfer( + bob().into(), + 15 * DOLLARS, + )), + }, ], (time2 / SLOT_DURATION).into(), ); @@ -158,7 +170,7 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { CheckedExtrinsic { signed: Some((alice(), signed_extra(nonce, 0))), function: Call::System(frame_system::Call::remark(vec![0; size])), - } + }, ], (time * 1000 / SLOT_DURATION).into(), ) @@ -169,7 +181,7 @@ fn panic_execution_with_foreign_code_gives_error() { let mut t = new_test_ext(bloaty_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (69u128, 0u32, 0u128, 0u128, 0u128).encode() + (69u128, 0u32, 0u128, 0u128, 0u128).encode(), ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -180,7 +192,8 @@ fn panic_execution_with_foreign_code_gives_error() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let v = executor_call:: _>( &mut t, @@ -188,7 +201,9 @@ fn panic_execution_with_foreign_code_gives_error() { &vec![].and(&xt()), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -198,7 +213,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { let mut t = new_test_ext(compact_code_unwrap(), false); t.insert( >::hashed_key_for(alice()), - (0u32, 0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode() + (0u32, 0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode(), ); t.insert(>::hashed_key().to_vec(), 69_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -209,7 +224,8 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let v = executor_call:: _>( &mut t, @@ -217,7 +233,9 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { &vec![].and(&xt()), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -229,19 +247,21 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { >::hashed_key_for(alice()), AccountInfo::<::Index, _> { data: (111 * DOLLARS, 0u128, 0u128, 0u128), - .. Default::default() - }.encode(), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key_for(bob()), AccountInfo::<::Index, _> { data: (0 * DOLLARS, 0u128, 0u128, 0u128), - .. Default::default() - }.encode(), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key().to_vec(), - (111 * DOLLARS).encode() + (111 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -251,7 +271,8 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); @@ -262,7 +283,8 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { &vec![].and(&xt()), true, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -278,19 +300,21 @@ fn successful_execution_with_foreign_code_gives_ok() { >::hashed_key_for(alice()), AccountInfo::<::Index, _> { data: (111 * DOLLARS, 0u128, 0u128, 0u128), - .. Default::default() - }.encode(), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key_for(bob()), AccountInfo::<::Index, _> { data: (0 * DOLLARS, 0u128, 0u128, 0u128), - .. Default::default() - }.encode(), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key().to_vec(), - (111 * DOLLARS).encode() + (111 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -300,7 +324,8 @@ fn successful_execution_with_foreign_code_gives_ok() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let fees = t.execute_with(|| transfer_fee(&xt())); @@ -311,7 +336,8 @@ fn successful_execution_with_foreign_code_gives_ok() { &vec![].and(&xt()), true, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -330,7 +356,9 @@ fn full_native_block_import_works() { let mut fees = t.execute_with(|| transfer_fee(&xt())); let transfer_weight = default_transfer_call().get_dispatch_info().weight; - let timestamp_weight = pallet_timestamp::Call::set::(Default::default()).get_dispatch_info().weight; + let timestamp_weight = pallet_timestamp::Call::set::(Default::default()) + .get_dispatch_info() + .weight; executor_call:: _>( &mut t, @@ -338,7 +366,9 @@ fn full_native_block_import_works() { &block1.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -347,9 +377,11 @@ fn full_native_block_import_works() { let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::System(frame_system::Event::ExtrinsicSuccess( - DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: timestamp_weight, + class: DispatchClass::Mandatory, + ..Default::default() + })), topics: vec![], }, EventRecord { @@ -368,9 +400,10 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::System(frame_system::Event::ExtrinsicSuccess( - DispatchInfo { weight: transfer_weight, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: transfer_weight, + ..Default::default() + })), topics: vec![], }, ]; @@ -385,34 +418,33 @@ fn full_native_block_import_works() { &block2.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), alice_last_known_balance - 10 * DOLLARS - fees, ); - assert_eq!( - Balances::total_balance(&bob()), - 179 * DOLLARS - fees, - ); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees,); let events = vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: Event::System(frame_system::Event::ExtrinsicSuccess( - DispatchInfo { weight: timestamp_weight, class: DispatchClass::Mandatory, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: timestamp_weight, + class: DispatchClass::Mandatory, + ..Default::default() + })), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::Balances( - pallet_balances::Event::Transfer( - bob().into(), - alice().into(), - 5 * DOLLARS, - ) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + bob().into(), + alice().into(), + 5 * DOLLARS, + )), topics: vec![], }, EventRecord { @@ -422,20 +454,19 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::System(frame_system::Event::ExtrinsicSuccess( - DispatchInfo { weight: transfer_weight, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: transfer_weight, + ..Default::default() + })), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::Balances( - pallet_balances::Event::Transfer( - alice().into(), - bob().into(), - 15 * DOLLARS, - ) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + alice().into(), + bob().into(), + 15 * DOLLARS, + )), topics: vec![], }, EventRecord { @@ -445,9 +476,10 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::System(frame_system::Event::ExtrinsicSuccess( - DispatchInfo { weight: transfer_weight, ..Default::default() } - )), + event: Event::System(frame_system::Event::ExtrinsicSuccess(DispatchInfo { + weight: transfer_weight, + ..Default::default() + })), topics: vec![], }, ]; @@ -470,7 +502,9 @@ fn full_wasm_block_import_works() { &block1.0, false, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); @@ -486,17 +520,16 @@ fn full_wasm_block_import_works() { &block2.0, false, None, - ).0.unwrap(); + ) + .0 + .unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), alice_last_known_balance - 10 * DOLLARS - fees, ); - assert_eq!( - Balances::total_balance(&bob()), - 179 * DOLLARS - 1 * fees, - ); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees,); }); } @@ -600,11 +633,7 @@ fn deploying_wasm_contract_should_work() { let transfer_code = wat::parse_str(CODE_TRANSFER).unwrap(); let transfer_ch = ::Hashing::hash(&transfer_code); - let addr = pallet_contracts::Pallet::::contract_address( - &charlie(), - &transfer_ch, - &[], - ); + let addr = pallet_contracts::Pallet::::contract_address(&charlie(), &transfer_ch, &[]); let subsistence = pallet_contracts::Pallet::::subsistence_threshold(); @@ -627,19 +656,17 @@ fn deploying_wasm_contract_should_work() { transfer_code, Vec::new(), Vec::new(), - ) + ), ), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), - function: Call::Contracts( - pallet_contracts::Call::call::( - sp_runtime::MultiAddress::Id(addr.clone()), - 10, - 500_000_000, - vec![0x00, 0x01, 0x02, 0x03] - ) - ), + function: Call::Contracts(pallet_contracts::Call::call::( + sp_runtime::MultiAddress::Id(addr.clone()), + 10, + 500_000_000, + vec![0x00, 0x01, 0x02, 0x03], + )), }, ], (time / SLOT_DURATION).into(), @@ -647,20 +674,14 @@ fn deploying_wasm_contract_should_work() { let mut t = new_test_ext(compact_code_unwrap(), false); - executor_call:: _>( - &mut t, - "Core_execute_block", - &b.0, - false, - None, - ).0.unwrap(); + executor_call:: _>(&mut t, "Core_execute_block", &b.0, false, None) + .0 + .unwrap(); t.execute_with(|| { // Verify that the contract does exist by querying some of its storage items // It does not matter that the storage item itself does not exist. - assert!( - &pallet_contracts::Pallet::::get_storage(addr, Default::default()).is_ok() - ); + assert!(&pallet_contracts::Pallet::::get_storage(addr, Default::default()).is_ok()); }); } @@ -676,7 +697,8 @@ fn wasm_big_block_import_fails() { &block_with_size(42, 0, 120_000).0, false, None, - ).0; + ) + .0; assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } @@ -690,7 +712,9 @@ fn native_big_block_import_succeeds() { &block_with_size(42, 0, 120_000).0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); } #[test] @@ -700,15 +724,15 @@ fn native_big_block_import_fails_on_fallback() { // We set the heap pages to 8 because we know that should give an OOM in WASM with the given block. set_heap_pages(&mut t.ext(), 8); - assert!( - executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - false, - None, - ).0.is_err() - ); + assert!(executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + false, + None, + ) + .0 + .is_err()); } #[test] @@ -718,8 +742,9 @@ fn panic_execution_gives_error() { >::hashed_key_for(alice()), AccountInfo::<::Index, _> { data: (0 * DOLLARS, 0u128, 0u128, 0u128), - .. Default::default() - }.encode(), + ..Default::default() + } + .encode(), ); t.insert(>::hashed_key().to_vec(), 0_u128.encode()); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -730,7 +755,8 @@ fn panic_execution_gives_error() { &vec![].and(&from_block_number(1u32)), false, None, - ).0; + ) + .0; assert!(r.is_ok()); let r = executor_call:: _>( &mut t, @@ -738,7 +764,10 @@ fn panic_execution_gives_error() { &vec![].and(&xt()), false, None, - ).0.unwrap().into_encoded(); + ) + .0 + .unwrap() + .into_encoded(); let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -750,19 +779,21 @@ fn successful_execution_gives_ok() { >::hashed_key_for(alice()), AccountInfo::<::Index, _> { data: (111 * DOLLARS, 0u128, 0u128, 0u128), - .. Default::default() - }.encode(), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key_for(bob()), AccountInfo::<::Index, _> { data: (0 * DOLLARS, 0u128, 0u128, 0u128), - .. Default::default() - }.encode(), + ..Default::default() + } + .encode(), ); t.insert( >::hashed_key().to_vec(), - (111 * DOLLARS).encode() + (111 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -772,7 +803,8 @@ fn successful_execution_gives_ok() { &vec![].and(&from_block_number(1u32)), false, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); @@ -786,7 +818,10 @@ fn successful_execution_gives_ok() { &vec![].and(&xt()), false, None, - ).0.unwrap().into_encoded(); + ) + .0 + .unwrap() + .into_encoded(); ApplyExtrinsicResult::decode(&mut &r[..]) .unwrap() .expect("Extrinsic could not be applied") @@ -811,7 +846,9 @@ fn full_native_block_import_works_with_changes_trie() { &block.encode(), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); } @@ -827,7 +864,9 @@ fn full_wasm_block_import_works_with_changes_trie() { &block1.0, false, None, - ).0.unwrap(); + ) + .0 + .unwrap(); assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); } @@ -835,8 +874,7 @@ fn full_wasm_block_import_works_with_changes_trie() { #[test] fn should_import_block_with_test_client() { use node_testing::client::{ - ClientBlockImportExt, TestClientBuilderExt, TestClientBuilder, - sp_consensus::BlockOrigin, + sp_consensus::BlockOrigin, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, }; let mut client = TestClientBuilder::new().build(); diff --git a/substrate/bin/node/executor/tests/common.rs b/substrate/bin/node/executor/tests/common.rs index 635155b5d00b2fd59e1449c455228bc2105b6d21..414b335406be87d1f42810a352c75bbe60ccb5b9 100644 --- a/substrate/bin/node/executor/tests/common.rs +++ b/substrate/bin/node/executor/tests/common.rs @@ -15,34 +15,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; -use frame_system::offchain::AppCrypto; +use codec::{Decode, Encode}; use frame_support::Hashable; -use sp_state_machine::TestExternalities as CoreTestExternalities; -use sp_consensus_babe::{BABE_ENGINE_ID, Slot, digests::{PreDigest, SecondaryPlainPreDigest}}; +use frame_system::offchain::AppCrypto; +use sc_executor::{error::Result, NativeExecutor, WasmExecutionMethod}; +use sp_consensus_babe::{ + digests::{PreDigest, SecondaryPlainPreDigest}, + Slot, BABE_ENGINE_ID, +}; use sp_core::{ - NeverNativeValue, NativeOrEncoded, crypto::KeyTypeId, sr25519::Signature, traits::{CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; use sp_runtime::{ - ApplyExtrinsicResult, - MultiSigner, - MultiSignature, - Digest, - DigestItem, - traits::{Header as HeaderT, BlakeTwo256}, + traits::{BlakeTwo256, Header as HeaderT}, + ApplyExtrinsicResult, Digest, DigestItem, MultiSignature, MultiSigner, }; -use sc_executor::{NativeExecutor, WasmExecutionMethod}; -use sc_executor::error::Result; +use sp_state_machine::TestExternalities as CoreTestExternalities; use node_executor::Executor; +use node_primitives::{BlockNumber, Hash}; use node_runtime::{ - Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Runtime, BuildStorage, - constants::currency::*, + constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime, + UncheckedExtrinsic, }; -use node_primitives::{Hash, BlockNumber}; use node_testing::keyring::*; use sp_externalities::Externalities; @@ -50,8 +48,8 @@ pub const TEST_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"test"); pub mod sr25519 { mod app_sr25519 { - use sp_application_crypto::{app_crypto, sr25519}; use super::super::TEST_KEY_TYPE_ID; + use sp_application_crypto::{app_crypto, sr25519}; app_crypto!(sr25519, TEST_KEY_TYPE_ID); } @@ -72,8 +70,10 @@ impl AppCrypto for TestAuthorityId { /// as canonical. This is why `native_executor_instance` also uses the compact version of the /// runtime. pub fn compact_code_unwrap() -> &'static [u8] { - node_runtime::WASM_BINARY.expect("Development wasm binary is not available. \ - Testing is only supported with the flag disabled.") + node_runtime::WASM_BINARY.expect( + "Development wasm binary is not available. \ + Testing is only supported with the flag disabled.", + ) } pub const GENESIS_HASH: [u8; 32] = [69u8; 32]; @@ -101,8 +101,9 @@ pub fn executor() -> NativeExecutor { } pub fn executor_call< - R:Decode + Encode + PartialEq, - NC: FnOnce() -> std::result::Result> + std::panic::UnwindSafe + R: Decode + Encode + PartialEq, + NC: FnOnce() -> std::result::Result> + + std::panic::UnwindSafe, >( t: &mut TestExternalities, method: &str, @@ -120,20 +121,15 @@ pub fn executor_call< heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), }; - executor().call::( - &mut t, - &runtime_code, - method, - data, - use_native, - native_call, - ) + executor().call::(&mut t, &runtime_code, method, data, use_native, native_call) } pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { let mut ext = TestExternalities::new_with_code( code, - node_testing::genesis::config(support_changes_trie, Some(code)).build_storage().unwrap(), + node_testing::genesis::config(support_changes_trie, Some(code)) + .build_storage() + .unwrap(), ); ext.changes_trie_storage().insert(0, GENESIS_HASH.into(), Default::default()); ext @@ -150,7 +146,7 @@ pub fn construct_block( extrinsics: Vec, babe_slot: Slot, ) -> (Vec, Hash) { - use sp_trie::{TrieConfiguration, trie_types::Layout}; + use sp_trie::{trie_types::Layout, TrieConfiguration}; // sign extrinsics. let extrinsics = extrinsics.into_iter().map(sign).collect::>(); @@ -167,15 +163,14 @@ pub fn construct_block( extrinsics_root, state_root: Default::default(), digest: Digest { - logs: vec![ - DigestItem::PreRuntime( - BABE_ENGINE_ID, - PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - slot: babe_slot, - authority_index: 42, - }).encode() - ), - ], + logs: vec![DigestItem::PreRuntime( + BABE_ENGINE_ID, + PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + slot: babe_slot, + authority_index: 42, + }) + .encode(), + )], }, }; @@ -186,7 +181,9 @@ pub fn construct_block( &header.encode(), true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); for extrinsic in extrinsics.iter() { // Try to apply the `extrinsic`. It should be valid, in the sense that it passes @@ -197,8 +194,13 @@ pub fn construct_block( &extrinsic.encode(), true, None, - ).0.expect("application of an extrinsic failed").into_encoded(); - match ApplyExtrinsicResult::decode(&mut &r[..]).expect("apply result deserialization failed") { + ) + .0 + .expect("application of an extrinsic failed") + .into_encoded(); + match ApplyExtrinsicResult::decode(&mut &r[..]) + .expect("apply result deserialization failed") + { Ok(_) => {}, Err(e) => panic!("Applying extrinsic failed: {:?}", e), } @@ -207,10 +209,13 @@ pub fn construct_block( let header = match executor_call:: _>( env, "BlockBuilder_finalize_block", - &[0u8;0], + &[0u8; 0], true, None, - ).0.unwrap() { + ) + .0 + .unwrap() + { NativeOrEncoded::Native(_) => unreachable!(), NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), }; diff --git a/substrate/bin/node/executor/tests/fees.rs b/substrate/bin/node/executor/tests/fees.rs index 5474adbd32a8900a28a65c11a0d4982ce53c6590..3bc9179da2b3d1f7d62a802e583c6d2fb159762c 100644 --- a/substrate/bin/node/executor/tests/fees.rs +++ b/substrate/bin/node/executor/tests/fees.rs @@ -18,20 +18,21 @@ use codec::{Encode, Joiner}; use frame_support::{ traits::Currency, - weights::{GetDispatchInfo, constants::ExtrinsicBaseWeight, IdentityFee, WeightToFeePolynomial}, + weights::{ + constants::ExtrinsicBaseWeight, GetDispatchInfo, IdentityFee, WeightToFeePolynomial, + }, }; -use sp_core::NeverNativeValue; -use sp_runtime::{Perbill, traits::One}; +use node_primitives::Balance; use node_runtime::{ - CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, Multiplier, - TransactionByteFee, - constants::{time::SLOT_DURATION, currency::*}, + constants::{currency::*, time::SLOT_DURATION}, + Balances, Call, CheckedExtrinsic, Multiplier, Runtime, TransactionByteFee, TransactionPayment, }; -use node_primitives::Balance; use node_testing::keyring::*; +use sp_core::NeverNativeValue; +use sp_runtime::{traits::One, Perbill}; pub mod common; -use self::common::{*, sign}; +use self::common::{sign, *}; #[test] fn fee_multiplier_increases_and_decreases_on_big_weight() { @@ -60,7 +61,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(60))), - } + }, ], (time1 / SLOT_DURATION).into(), ); @@ -79,7 +80,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), function: Call::System(frame_system::Call::remark(vec![0; 1])), - } + }, ], (time2 / SLOT_DURATION).into(), ); @@ -97,7 +98,9 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { &block1.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -114,7 +117,9 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { &block2.0, true, None, - ).0.unwrap(); + ) + .0 + .unwrap(); // weight multiplier is increased for next block. t.execute_with(|| { @@ -131,7 +136,8 @@ fn new_account_info(free_dollars: u128) -> Vec { providers: 0, sufficients: 0, data: (free_dollars * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS), - }.encode() + } + .encode() } #[test] @@ -148,7 +154,7 @@ fn transaction_fee_is_correct() { t.insert(>::hashed_key_for(bob()), new_account_info(10)); t.insert( >::hashed_key().to_vec(), - (110 * DOLLARS).encode() + (110 * DOLLARS).encode(), ); t.insert(>::hashed_key_for(0), vec![0u8; 32]); @@ -164,7 +170,8 @@ fn transaction_fee_is_correct() { &vec![].and(&from_block_number(1u32)), true, None, - ).0; + ) + .0; assert!(r.is_ok()); let r = executor_call:: _>( @@ -173,7 +180,8 @@ fn transaction_fee_is_correct() { &vec![].and(&xt.clone()), true, None, - ).0; + ) + .0; assert!(r.is_ok()); t.execute_with(|| { @@ -228,15 +236,20 @@ fn block_weight_capacity_report() { loop { let num_transfers = block_number * factor; - let mut xts = (0..num_transfers).map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)), - }).collect::>(); - - xts.insert(0, CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), - }); + let mut xts = (0..num_transfers) + .map(|i| CheckedExtrinsic { + signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), + function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)), + }) + .collect::>(); + + xts.insert( + 0, + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), + }, + ); // NOTE: this is super slow. Can probably be improved. let block = construct_block( @@ -262,7 +275,8 @@ fn block_weight_capacity_report() { &block.0, true, None, - ).0; + ) + .0; println!(" || Result = {:?}", r); assert!(r.is_ok()); @@ -307,7 +321,11 @@ fn block_length_capacity_report() { }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(nonce, 0))), - function: Call::System(frame_system::Call::remark(vec![0u8; (block_number * factor) as usize])), + function: Call::System(frame_system::Call::remark(vec![ + 0u8; + (block_number * factor) + as usize + ])), }, ], (time * 1000 / SLOT_DURATION).into(), @@ -327,7 +345,8 @@ fn block_length_capacity_report() { &block.0, true, None, - ).0; + ) + .0; println!(" || Result = {:?}", r); assert!(r.is_ok()); diff --git a/substrate/bin/node/executor/tests/submit_transaction.rs b/substrate/bin/node/executor/tests/submit_transaction.rs index 590bdac4db757c784033f3d41bf5b6540fcc01c6..c83e48c8c933b157eea8da24238c8fad07c339c9 100644 --- a/substrate/bin/node/executor/tests/submit_transaction.rs +++ b/substrate/bin/node/executor/tests/submit_transaction.rs @@ -15,26 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::sync::Arc; -use node_runtime::{ - Executive, Indices, Runtime, UncheckedExtrinsic, -}; -use sp_application_crypto::AppKey; -use sp_core::{ - offchain::{ - TransactionPoolExt, - testing::TestTransactionPoolExt, - }, -}; -use sp_keystore::{KeystoreExt, SyncCryptoStore, testing::KeyStore}; -use frame_system::{ - offchain::{ - Signer, - SubmitTransaction, - SendSignedTransaction, - } -}; use codec::Decode; +use frame_system::offchain::{SendSignedTransaction, Signer, SubmitTransaction}; +use node_runtime::{Executive, Indices, Runtime, UncheckedExtrinsic}; +use sp_application_crypto::AppKey; +use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; +use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStore}; +use std::sync::Arc; pub mod common; use self::common::*; @@ -56,8 +43,10 @@ fn should_submit_unsigned_transaction() { }; let call = pallet_im_online::Call::heartbeat(heartbeat_data, signature); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) - .unwrap(); + SubmitTransaction::>::submit_unsigned_transaction( + call.into(), + ) + .unwrap(); assert_eq!(state.read().transactions.len(), 1) }); @@ -75,23 +64,26 @@ fn should_submit_signed_transaction() { SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter2", PHRASE)), + ) + .unwrap(); SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter3", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter3", PHRASE)), + ) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { - let results = Signer::::all_accounts() - .send_signed_transaction(|_| { + let results = + Signer::::all_accounts().send_signed_transaction(|_| { pallet_balances::Call::transfer(Default::default(), Default::default()) }); @@ -112,18 +104,20 @@ fn should_submit_signed_twice_from_the_same_account() { SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); SyncCryptoStore::sr25519_generate_new( &keystore, sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter2", PHRASE)), + ) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { - let result = Signer::::any_account() - .send_signed_transaction(|_| { + let result = + Signer::::any_account().send_signed_transaction(|_| { pallet_balances::Call::transfer(Default::default(), Default::default()) }); @@ -131,8 +125,8 @@ fn should_submit_signed_twice_from_the_same_account() { assert_eq!(state.read().transactions.len(), 1); // submit another one from the same account. The nonce should be incremented. - let result = Signer::::any_account() - .send_signed_transaction(|_| { + let result = + Signer::::any_account().send_signed_transaction(|_| { pallet_balances::Call::transfer(Default::default(), Default::default()) }); @@ -147,10 +141,7 @@ fn should_submit_signed_twice_from_the_same_account() { } let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); - assert!( - nonce1 != nonce2, - "Transactions should have different nonces. Got: {:?}", nonce1 - ); + assert!(nonce1 != nonce2, "Transactions should have different nonces. Got: {:?}", nonce1); }); } @@ -161,14 +152,12 @@ fn should_submit_signed_twice_from_all_accounts() { t.register_extension(TransactionPoolExt::new(pool)); let keystore = KeyStore::new(); - keystore.sr25519_generate_new( - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); - keystore.sr25519_generate_new( - sr25519::AuthorityId::ID, - Some(&format!("{}/hunter2", PHRASE)) - ).unwrap(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + keystore + .sr25519_generate_new(sr25519::AuthorityId::ID, Some(&format!("{}/hunter2", PHRASE))) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { @@ -217,8 +206,10 @@ fn should_submit_signed_twice_from_all_accounts() { #[test] fn submitted_transaction_should_be_valid() { use codec::Encode; - use sp_runtime::transaction_validity::{TransactionSource, TransactionTag}; - use sp_runtime::traits::StaticLookup; + use sp_runtime::{ + traits::StaticLookup, + transaction_validity::{TransactionSource, TransactionTag}, + }; let mut t = new_test_ext(compact_code_unwrap(), false); let (pool, state) = TestTransactionPoolExt::new(); @@ -227,13 +218,15 @@ fn submitted_transaction_should_be_valid() { let keystore = KeyStore::new(); SyncCryptoStore::sr25519_generate_new( &keystore, - sr25519::AuthorityId::ID, Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + sr25519::AuthorityId::ID, + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); t.register_extension(KeystoreExt(Arc::new(keystore))); t.execute_with(|| { - let results = Signer::::all_accounts() - .send_signed_transaction(|_| { + let results = + Signer::::all_accounts().send_signed_transaction(|_| { pallet_balances::Call::transfer(Default::default(), Default::default()) }); let len = results.len(); @@ -252,7 +245,7 @@ fn submitted_transaction_should_be_valid() { let author = extrinsic.signature.clone().unwrap().0; let address = Indices::lookup(author).unwrap(); let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; - let account = frame_system::AccountInfo { data, .. Default::default() }; + let account = frame_system::AccountInfo { data, ..Default::default() }; >::insert(&address, account); // check validity @@ -260,7 +253,8 @@ fn submitted_transaction_should_be_valid() { source, extrinsic, frame_system::BlockHash::::get(0), - ).unwrap(); + ) + .unwrap(); // We ignore res.priority since this number can change based on updates to weights and such. assert_eq!(res.requires, Vec::::new()); diff --git a/substrate/bin/node/inspect/src/cli.rs b/substrate/bin/node/inspect/src/cli.rs index abdbedc296d022ab65c3bff12955abd6710a423e..c054fedaf57c45bc5a2ada6d71c5286c72638b46 100644 --- a/substrate/bin/node/inspect/src/cli.rs +++ b/substrate/bin/node/inspect/src/cli.rs @@ -18,8 +18,8 @@ //! Structs to easily compose inspect sub-command for CLI. -use std::fmt::Debug; use sc_cli::{ImportParams, SharedParams}; +use std::fmt::Debug; use structopt::StructOpt; /// The `inspect` command used to print decoded chain data. diff --git a/substrate/bin/node/inspect/src/command.rs b/substrate/bin/node/inspect/src/command.rs index 9c14a71375f5f188290d5e8393fcb153ac0e4ed8..a2c63d684bf96418e381c4be516aa143e5ee004e 100644 --- a/substrate/bin/node/inspect/src/command.rs +++ b/substrate/bin/node/inspect/src/command.rs @@ -18,8 +18,10 @@ //! Command ran by the CLI -use crate::cli::{InspectCmd, InspectSubCmd}; -use crate::Inspector; +use crate::{ + cli::{InspectCmd, InspectSubCmd}, + Inspector, +}; use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; use sc_service::{new_full_client, Configuration, NativeExecutionDispatch}; use sp_runtime::traits::Block; @@ -43,13 +45,13 @@ impl InspectCmd { let res = inspect.block(input).map_err(|e| format!("{}", e))?; println!("{}", res); Ok(()) - } + }, InspectSubCmd::Extrinsic { input } => { let input = input.parse()?; let res = inspect.extrinsic(input).map_err(|e| format!("{}", e))?; println!("{}", res); Ok(()) - } + }, } } } diff --git a/substrate/bin/node/inspect/src/lib.rs b/substrate/bin/node/inspect/src/lib.rs index 3abb9e9ff41e7c8a519e8d5b0843f6618280f2d7..30e7250ea2c6cef6ecde923527566f42d48514ed 100644 --- a/substrate/bin/node/inspect/src/lib.rs +++ b/substrate/bin/node/inspect/src/lib.rs @@ -27,33 +27,27 @@ pub mod cli; pub mod command; -use std::{ - fmt, - fmt::Debug, - marker::PhantomData, - str::FromStr, -}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sc_client_api::BlockBackend; use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ generic::BlockId, - traits::{Block, HashFor, NumberFor, Hash} + traits::{Block, Hash, HashFor, NumberFor}, }; +use std::{fmt, fmt::Debug, marker::PhantomData, str::FromStr}; /// A helper type for a generic block input. -pub type BlockAddressFor = BlockAddress< - as Hash>::Output, - NumberFor ->; +pub type BlockAddressFor = + BlockAddress< as Hash>::Output, NumberFor>; /// A Pretty formatter implementation. pub trait PrettyPrinter { /// Nicely format block. fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result; /// Nicely format extrinsic. - fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result; + fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) + -> fmt::Result; } /// Default dummy debug printer. @@ -72,7 +66,11 @@ impl PrettyPrinter for DebugPrinter { Ok(()) } - fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result { + fn fmt_extrinsic( + &self, + fmt: &mut fmt::Formatter, + extrinsic: &TBlock::Extrinsic, + ) -> fmt::Result { writeln!(fmt, " {:#?}", extrinsic)?; writeln!(fmt, " Bytes: {:?}", HexDisplay::from(&extrinsic.encode()))?; Ok(()) @@ -101,15 +99,14 @@ impl std::error::Error for Error { } /// A helper trait to access block headers and bodies. -pub trait ChainAccess: - HeaderBackend + - BlockBackend -{} +pub trait ChainAccess: HeaderBackend + BlockBackend {} -impl ChainAccess for T where +impl ChainAccess for T +where TBlock: Block, T: sp_blockchain::HeaderBackend + sc_client_api::BlockBackend, -{} +{ +} /// Blockchain inspector. pub struct Inspector = DebugPrinter> { @@ -120,22 +117,16 @@ pub struct Inspector = DebugPrint impl> Inspector { /// Create new instance of the inspector with default printer. - pub fn new( - chain: impl ChainAccess + 'static, - ) -> Self where TPrinter: Default { + pub fn new(chain: impl ChainAccess + 'static) -> Self + where + TPrinter: Default, + { Self::with_printer(chain, Default::default()) } /// Customize pretty-printing of the data. - pub fn with_printer( - chain: impl ChainAccess + 'static, - printer: TPrinter, - ) -> Self { - Inspector { - chain: Box::new(chain) as _, - printer, - _block: Default::default(), - } + pub fn with_printer(chain: impl ChainAccess + 'static, printer: TPrinter) -> Self { + Inspector { chain: Box::new(chain) as _, printer, _block: Default::default() } } /// Get a pretty-printed block. @@ -153,25 +144,27 @@ impl> Inspector fn get_block(&self, input: BlockAddressFor) -> Result { Ok(match input { - BlockAddress::Bytes(bytes) => { - TBlock::decode(&mut &*bytes)? - }, + BlockAddress::Bytes(bytes) => TBlock::decode(&mut &*bytes)?, BlockAddress::Number(number) => { let id = BlockId::number(number); let not_found = format!("Could not find block {:?}", id); - let body = self.chain.block_body(&id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - let header = self.chain.header(id)? + let body = self + .chain + .block_body(&id)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; + let header = + self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; TBlock::new(header, body) }, BlockAddress::Hash(hash) => { let id = BlockId::hash(hash); let not_found = format!("Could not find block {:?}", id); - let body = self.chain.block_body(&id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - let header = self.chain.header(id)? + let body = self + .chain + .block_body(&id)? .ok_or_else(|| Error::NotFound(not_found.clone()))?; + let header = + self.chain.header(id)?.ok_or_else(|| Error::NotFound(not_found.clone()))?; TBlock::new(header, body) }, }) @@ -192,16 +185,14 @@ impl> Inspector let ext = match input { ExtrinsicAddress::Block(block, index) => { let block = self.get_block(block)?; - block.extrinsics() - .get(index) - .cloned() - .ok_or_else(|| Error::NotFound(format!( - "Could not find extrinsic {} in block {:?}", index, block - )))? + block.extrinsics().get(index).cloned().ok_or_else(|| { + Error::NotFound(format!( + "Could not find extrinsic {} in block {:?}", + index, block + )) + })? }, - ExtrinsicAddress::Bytes(bytes) => { - TBlock::Extrinsic::decode(&mut &*bytes)? - } + ExtrinsicAddress::Bytes(bytes) => TBlock::Extrinsic::decode(&mut &*bytes)?, }; Ok(format!("{}", ExtrinsicPrinter(ext, &self.printer))) @@ -234,12 +225,12 @@ impl FromStr for BlockAddress { } // then assume it's bytes (hex-encoded) - sp_core::bytes::from_hex(s) - .map(Self::Bytes) - .map_err(|e| format!( + sp_core::bytes::from_hex(s).map(Self::Bytes).map_err(|e| { + format!( "Given string does not look like hash or number. It could not be parsed as bytes either: {}", e - )) + ) + }) } } @@ -263,11 +254,13 @@ impl FromStr for ExtrinsicAddres // split by a bunch of different characters let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); - let block = it.next() + let block = it + .next() .expect("First element of split iterator is never empty; qed") .parse()?; - let index = it.next() + let index = it + .next() .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? .parse() .map_err(|e| format!("Invalid index format: {}", e))?; @@ -290,10 +283,10 @@ mod tests { let b2 = BlockAddress::from_str("0"); let b3 = BlockAddress::from_str("0x0012345f"); - - assert_eq!(b0, Ok(BlockAddress::Hash( - "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() - ))); + assert_eq!( + b0, + Ok(BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap())) + ); assert_eq!(b1, Ok(BlockAddress::Number(1234))); assert_eq!(b2, Ok(BlockAddress::Number(0))); assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); @@ -310,20 +303,16 @@ mod tests { let b2 = ExtrinsicAddress::from_str("0 0"); let b3 = ExtrinsicAddress::from_str("0x0012345f"); - assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); - assert_eq!(b0, Ok(ExtrinsicAddress::Block( - BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), - 5 - ))); - assert_eq!(b1, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(1234), - 0 - ))); - assert_eq!(b2, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(0), - 0 - ))); + assert_eq!( + b0, + Ok(ExtrinsicAddress::Block( + BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), + 5 + )) + ); + assert_eq!(b1, Ok(ExtrinsicAddress::Block(BlockAddress::Number(1234), 0))); + assert_eq!(b2, Ok(ExtrinsicAddress::Block(BlockAddress::Number(0), 0))); assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); } } diff --git a/substrate/bin/node/primitives/src/lib.rs b/substrate/bin/node/primitives/src/lib.rs index 9470adc399f9680d4e185e081980416d980a5df3..dade598c704d2c7cf3d215d54686ec4a2d88e6f3 100644 --- a/substrate/bin/node/primitives/src/lib.rs +++ b/substrate/bin/node/primitives/src/lib.rs @@ -18,11 +18,12 @@ //! Low-level types used throughout the Substrate code. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] use sp_runtime::{ - generic, traits::{Verify, BlakeTwo256, IdentifyAccount}, OpaqueExtrinsic, MultiSignature + generic, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiSignature, OpaqueExtrinsic, }; /// An index to a block. diff --git a/substrate/bin/node/rpc-client/src/main.rs b/substrate/bin/node/rpc-client/src/main.rs index ddd8a50ad36e44344466ccc20648f755e7cb7311..46e700a73911b8df1a1633cfa4d5eee9c91dd5fd 100644 --- a/substrate/bin/node/rpc-client/src/main.rs +++ b/substrate/bin/node/rpc-client/src/main.rs @@ -24,15 +24,9 @@ use futures::Future; use hyper::rt; +use jsonrpc_core_client::{transports::http, RpcError}; use node_primitives::Hash; -use sc_rpc::author::{ - AuthorClient, - hash::ExtrinsicOrHash, -}; -use jsonrpc_core_client::{ - transports::http, - RpcError, -}; +use sc_rpc::author::{hash::ExtrinsicOrHash, AuthorClient}; fn main() { sp_tracing::try_init_simple(); @@ -41,9 +35,7 @@ fn main() { let uri = "http://localhost:9933"; http::connect(uri) - .and_then(|client: AuthorClient| { - remove_all_extrinsics(client) - }) + .and_then(|client: AuthorClient| remove_all_extrinsics(client)) .map_err(|e| { println!("Error: {:?}", e); }) @@ -58,11 +50,14 @@ fn main() { /// /// As the result of running the code the entire content of the transaction pool is going /// to be removed and the extrinsics are going to be temporarily banned. -fn remove_all_extrinsics(client: AuthorClient) -> impl Future { - client.pending_extrinsics() +fn remove_all_extrinsics( + client: AuthorClient, +) -> impl Future { + client + .pending_extrinsics() .and_then(move |pending| { client.remove_extrinsic( - pending.into_iter().map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())).collect() + pending.into_iter().map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())).collect(), ) }) .map(|removed| { diff --git a/substrate/bin/node/rpc/src/lib.rs b/substrate/bin/node/rpc/src/lib.rs index ba17bf7d2c50d0c234128abccb1227b6f8cfb094..1b326eda6c19689ec976110fde59976ed4099719 100644 --- a/substrate/bin/node/rpc/src/lib.rs +++ b/substrate/bin/node/rpc/src/lib.rs @@ -32,24 +32,24 @@ use std::sync::Arc; -use sp_keystore::SyncCryptoStorePtr; -use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; +use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; +use sc_client_api::AuxStore; use sc_consensus_babe::{Config, Epoch}; use sc_consensus_babe_rpc::BabeRpcHandler; use sc_consensus_epochs::SharedEpochChanges; use sc_finality_grandpa::{ - SharedVoterState, SharedAuthoritySet, FinalityProofProvider, GrandpaJustificationStream + FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState, }; use sc_finality_grandpa_rpc::GrandpaRpcHandler; +use sc_rpc::SubscriptionTaskExecutor; pub use sc_rpc_api::DenyUnsafe; +use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; -use sc_rpc::SubscriptionTaskExecutor; -use sc_transaction_pool_api::TransactionPool; -use sc_client_api::AuxStore; +use sp_keystore::SyncCryptoStorePtr; /// Light client extra dependencies. pub struct LightDeps { @@ -111,9 +111,15 @@ pub type IoHandler = jsonrpc_core::IoHandler; /// Instantiate all Full RPC extensions. pub fn create_full( deps: FullDeps, -) -> jsonrpc_core::IoHandler where - C: ProvideRuntimeApi + HeaderBackend + AuxStore + - HeaderMetadata + Sync + Send + 'static, +) -> jsonrpc_core::IoHandler +where + C: ProvideRuntimeApi + + HeaderBackend + + AuxStore + + HeaderMetadata + + Sync + + Send + + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_contracts_rpc::ContractsRuntimeApi, C::Api: pallet_mmr_rpc::MmrRuntimeApi::Hash>, @@ -121,31 +127,19 @@ pub fn create_full( C::Api: BabeApi, C::Api: BlockBuilder, P: TransactionPool + 'static, - SC: SelectChain +'static, + SC: SelectChain + 'static, B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use substrate_frame_rpc_system::{FullSystem, SystemApi}; use pallet_contracts_rpc::{Contracts, ContractsApi}; - use pallet_mmr_rpc::{MmrApi, Mmr}; + use pallet_mmr_rpc::{Mmr, MmrApi}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; let mut io = jsonrpc_core::IoHandler::default(); - let FullDeps { - client, - pool, - select_chain, - chain_spec, - deny_unsafe, - babe, - grandpa, - } = deps; + let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa } = deps; - let BabeDeps { - keystore, - babe_config, - shared_epoch_changes, - } = babe; + let BabeDeps { keystore, babe_config, shared_epoch_changes } = babe; let GrandpaDeps { shared_voter_state, shared_authority_set, @@ -154,64 +148,45 @@ pub fn create_full( finality_provider, } = grandpa; - io.extend_with( - SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)) - ); + io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe))); // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.extend_with( - ContractsApi::to_delegate(Contracts::new(client.clone())) - ); - io.extend_with( - MmrApi::to_delegate(Mmr::new(client.clone())) - ); - io.extend_with( - TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())) - ); - io.extend_with( - sc_consensus_babe_rpc::BabeApi::to_delegate( - BabeRpcHandler::new( - client.clone(), - shared_epoch_changes.clone(), - keystore, - babe_config, - select_chain, - deny_unsafe, - ), - ) - ); - io.extend_with( - sc_finality_grandpa_rpc::GrandpaApi::to_delegate( - GrandpaRpcHandler::new( - shared_authority_set.clone(), - shared_voter_state, - justification_stream, - subscription_executor, - finality_provider, - ) - ) - ); - - io.extend_with( - sc_sync_state_rpc::SyncStateRpcApi::to_delegate( - sc_sync_state_rpc::SyncStateRpcHandler::new( - chain_spec, - client, - shared_authority_set, - shared_epoch_changes, - deny_unsafe, - ) - ) - ); + io.extend_with(ContractsApi::to_delegate(Contracts::new(client.clone()))); + io.extend_with(MmrApi::to_delegate(Mmr::new(client.clone()))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone()))); + io.extend_with(sc_consensus_babe_rpc::BabeApi::to_delegate(BabeRpcHandler::new( + client.clone(), + shared_epoch_changes.clone(), + keystore, + babe_config, + select_chain, + deny_unsafe, + ))); + io.extend_with(sc_finality_grandpa_rpc::GrandpaApi::to_delegate(GrandpaRpcHandler::new( + shared_authority_set.clone(), + shared_voter_state, + justification_stream, + subscription_executor, + finality_provider, + ))); + + io.extend_with(sc_sync_state_rpc::SyncStateRpcApi::to_delegate( + sc_sync_state_rpc::SyncStateRpcHandler::new( + chain_spec, + client, + shared_authority_set, + shared_epoch_changes, + deny_unsafe, + ), + )); io } /// Instantiate all Light RPC extensions. -pub fn create_light( - deps: LightDeps, -) -> jsonrpc_core::IoHandler where +pub fn create_light(deps: LightDeps) -> jsonrpc_core::IoHandler +where C: sp_blockchain::HeaderBackend, C: Send + Sync + 'static, F: sc_client_api::light::Fetcher + 'static, @@ -220,16 +195,14 @@ pub fn create_light( { use substrate_frame_rpc_system::{LightSystem, SystemApi}; - let LightDeps { + let LightDeps { client, pool, remote_blockchain, fetcher } = deps; + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(SystemApi::::to_delegate(LightSystem::new( client, - pool, remote_blockchain, - fetcher - } = deps; - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with( - SystemApi::::to_delegate(LightSystem::new(client, remote_blockchain, fetcher, pool)) - ); + fetcher, + pool, + ))); io } diff --git a/substrate/bin/node/runtime/src/constants.rs b/substrate/bin/node/runtime/src/constants.rs index 2f6ad002a92834b51ffd404061e9568a7d35deea..7533025a70b00775e9d67d46566e119db3fede43 100644 --- a/substrate/bin/node/runtime/src/constants.rs +++ b/substrate/bin/node/runtime/src/constants.rs @@ -22,7 +22,7 @@ pub mod currency { use node_primitives::Balance; pub const MILLICENTS: Balance = 1_000_000_000; - pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. + pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. pub const DOLLARS: Balance = 100 * CENTS; pub const fn deposit(items: u32, bytes: u32) -> Balance { @@ -32,7 +32,7 @@ pub mod currency { /// Time. pub mod time { - use node_primitives::{Moment, BlockNumber}; + use node_primitives::{BlockNumber, Moment}; /// Since BABE is probabilistic this is the average expected block time that /// we are targeting. Blocks will be produced at a minimum duration defined diff --git a/substrate/bin/node/runtime/src/impls.rs b/substrate/bin/node/runtime/src/impls.rs index d3d0541b6ec0e9e3f6cd6e909a5789890dfc2d65..e315a45e698ce5b66767611646623e5f1c605972 100644 --- a/substrate/bin/node/runtime/src/impls.rs +++ b/substrate/bin/node/runtime/src/impls.rs @@ -17,8 +17,8 @@ //! Some configurable implementations as associated type for the substrate runtime. -use frame_support::traits::{OnUnbalanced, Currency}; -use crate::{Balances, Authorship, NegativeImbalance}; +use crate::{Authorship, Balances, NegativeImbalance}; +use frame_support::traits::{Currency, OnUnbalanced}; pub struct Author; impl OnUnbalanced for Author { @@ -29,19 +29,24 @@ impl OnUnbalanced for Author { #[cfg(test)] mod multiplier_tests { - use sp_runtime::{assert_eq_error_rate, FixedPointNumber, traits::{Convert, One, Zero}}; use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; + use sp_runtime::{ + assert_eq_error_rate, + traits::{Convert, One, Zero}, + FixedPointNumber, + }; use crate::{ constants::{currency::*, time::*}, - TransactionPayment, Runtime, TargetBlockFullness, - AdjustmentVariable, System, MinimumMultiplier, - RuntimeBlockWeights as BlockWeights, + AdjustmentVariable, MinimumMultiplier, Runtime, RuntimeBlockWeights as BlockWeights, + System, TargetBlockFullness, TransactionPayment, }; - use frame_support::weights::{Weight, WeightToFeePolynomial, DispatchClass}; + use frame_support::weights::{DispatchClass, Weight, WeightToFeePolynomial}; fn max_normal() -> Weight { - BlockWeights::get().get(DispatchClass::Normal).max_total + BlockWeights::get() + .get(DispatchClass::Normal) + .max_total .unwrap_or_else(|| BlockWeights::get().max_block) } @@ -64,7 +69,7 @@ mod multiplier_tests { } // update based on reference impl. - fn truth_value_update(block_weight: Weight, previous: Multiplier) -> Multiplier { + fn truth_value_update(block_weight: Weight, previous: Multiplier) -> Multiplier { let accuracy = Multiplier::accuracy() as f64; let previous_float = previous.into_inner() as f64 / accuracy; // bump if it is zero. @@ -81,15 +86,20 @@ mod multiplier_tests { // Current saturation in terms of weight let s = block_weight; - let t1 = v * (s/m - ss/m); - let t2 = v.powi(2) * (s/m - ss/m).powi(2) / 2.0; + let t1 = v * (s / m - ss / m); + let t2 = v.powi(2) * (s / m - ss / m).powi(2) / 2.0; let next_float = previous_float * (1.0 + t1 + t2); Multiplier::from_float(next_float) } - fn run_with_system_weight(w: Weight, assertions: F) where F: Fn() -> () { - let mut t: sp_io::TestExternalities = - frame_system::GenesisConfig::default().build_storage::().unwrap().into(); + fn run_with_system_weight(w: Weight, assertions: F) + where + F: Fn() -> (), + { + let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into(); t.execute_with(|| { System::set_block_consumed_resources(w, 0); assertions() @@ -157,7 +167,9 @@ mod multiplier_tests { loop { let next = runtime_multiplier_update(fm); fm = next; - if fm == min_multiplier() { break; } + if fm == min_multiplier() { + break + } iterations += 1; } assert!(iterations > 533_333); @@ -198,7 +210,9 @@ mod multiplier_tests { loop { let next = runtime_multiplier_update(fm); // if no change, panic. This should never happen in this case. - if fm == next { panic!("The fee should ever increase"); } + if fm == next { + panic!("The fee should ever increase"); + } fm = next; iterations += 1; let fee = @@ -225,7 +239,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); assert_eq_error_rate!( next, - truth_value_update(target() / 4 , fm), + truth_value_update(target() / 4, fm), Multiplier::from_inner(100), ); @@ -237,12 +251,11 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); assert_eq_error_rate!( next, - truth_value_update(target() / 2 , fm), + truth_value_update(target() / 2, fm), Multiplier::from_inner(100), ); // Light block. Multiplier is reduced a little. assert!(next < fm); - }); run_with_system_weight(target(), || { let next = runtime_multiplier_update(fm); @@ -259,7 +272,7 @@ mod multiplier_tests { let next = runtime_multiplier_update(fm); assert_eq_error_rate!( next, - truth_value_update(target() * 2 , fm), + truth_value_update(target() * 2, fm), Multiplier::from_inner(100), ); @@ -326,28 +339,24 @@ mod multiplier_tests { BlockWeights::get().max_block, Weight::max_value() / 2, Weight::max_value(), - ].into_iter().for_each(|i| { + ] + .into_iter() + .for_each(|i| { run_with_system_weight(i, || { let next = runtime_multiplier_update(Multiplier::one()); let truth = truth_value_update(i, Multiplier::one()); - assert_eq_error_rate!( - truth, - next, - Multiplier::from_inner(50_000_000) - ); + assert_eq_error_rate!(truth, next, Multiplier::from_inner(50_000_000)); }); }); // Some values that are all above the target and will cause an increase. let t = target(); - vec![t + 100, t * 2, t * 4] - .into_iter() - .for_each(|i| { - run_with_system_weight(i, || { - let fm = runtime_multiplier_update(max_fm); - // won't grow. The convert saturates everything. - assert_eq!(fm, max_fm); - }) - }); + vec![t + 100, t * 2, t * 4].into_iter().for_each(|i| { + run_with_system_weight(i, || { + let fm = runtime_multiplier_update(max_fm); + // won't grow. The convert saturates everything. + assert_eq!(fm, max_fm); + }) + }); } } diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 4c8f1a8298704b6e2874ef147274ddd48e63f219..82e3a9f7e084eacb67dd785b6cb0923925465ac4 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -22,67 +22,67 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] - -use sp_std::prelude::*; +use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ - construct_runtime, parameter_types, RuntimeDebug, + construct_runtime, parameter_types, + traits::{ + AllowAll, Currency, DenyAll, Imbalance, InstanceFilter, KeyOwnerProofSystem, + LockIdentifier, OnUnbalanced, U128CurrencyToVote, + }, weights::{ - Weight, IdentityFee, constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, - DispatchClass, - }, - traits::{ - Currency, Imbalance, KeyOwnerProofSystem, OnUnbalanced, LockIdentifier, - U128CurrencyToVote, AllowAll, DenyAll, + DispatchClass, IdentityFee, Weight, }, + PalletId, RuntimeDebug, }; use frame_system::{ - EnsureRoot, EnsureOneOf, - limits::{BlockWeights, BlockLength} + limits::{BlockLength, BlockWeights}, + EnsureOneOf, EnsureRoot, +}; +pub use node_primitives::{AccountId, Signature}; +use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; +use pallet_contracts::weights::WeightInfo; +use pallet_election_provider_multi_phase::FallbackStrategy; +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; -use frame_support::{traits::InstanceFilter, PalletId}; -use codec::{Encode, Decode, MaxEncodedLen}; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use pallet_session::historical as pallet_session_historical; +pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; +use sp_api::impl_runtime_apis; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_core::{ crypto::KeyTypeId, u32_trait::{_1, _2, _3, _4, _5}, OpaqueMetadata, }; -pub use node_primitives::{AccountId, Signature}; -use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; -use sp_api::impl_runtime_apis; +use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - Permill, Perbill, Perquintill, Percent, ApplyExtrinsicResult, impl_opaque_keys, generic, - create_runtime_str, FixedPointNumber, -}; -use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; -use sp_runtime::traits::{ - self, BlakeTwo256, Block as BlockT, StaticLookup, SaturatedConversion, ConvertInto, OpaqueKeys, - NumberFor, + create_runtime_str, + curve::PiecewiseLinear, + generic, impl_opaque_keys, + traits::{ + self, BlakeTwo256, Block as BlockT, ConvertInto, NumberFor, OpaqueKeys, + SaturatedConversion, StaticLookup, + }, + transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, FixedPointNumber, Perbill, Percent, Permill, Perquintill, }; -use sp_version::RuntimeVersion; +use sp_std::prelude::*; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use pallet_grandpa::fg_primitives; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; -pub use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment, CurrencyAdapter}; -use pallet_session::{historical as pallet_session_historical}; -use sp_inherents::{InherentData, CheckInherentsResult}; +use sp_version::RuntimeVersion; use static_assertions::const_assert; -use pallet_contracts::weights::WeightInfo; -use pallet_election_provider_multi_phase::FallbackStrategy; #[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; +pub use frame_system::Call as SystemCall; #[cfg(any(feature = "std", test))] pub use pallet_balances::Call as BalancesCall; #[cfg(any(feature = "std", test))] -pub use frame_system::Call as SystemCall; -#[cfg(any(feature = "std", test))] pub use pallet_staking::StakerStatus; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; @@ -90,7 +90,7 @@ use impls::Author; /// Constant values used within the runtime. pub mod constants; -use constants::{time::*, currency::*}; +use constants::{currency::*, time::*}; use sp_runtime::generic::Era; // Make the WASM binary available. @@ -100,9 +100,11 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. This means the client is \ + WASM_BINARY.expect( + "Development wasm binary is not available. This means the client is \ built with `SKIP_WASM_BUILD` flag and it is only usable for \ - production chains. Please rebuild with the flag disabled.") + production chains. Please rebuild with the flag disabled.", + ) } /// Runtime version. @@ -125,23 +127,20 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = sp_consensus_babe::BabeEpochConfiguration { c: PRIMARY_PROBABILITY, - allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots, }; /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } type NegativeImbalance = >::NegativeImbalance; pub struct DealWithFees; impl OnUnbalanced for DealWithFees { - fn on_unbalanceds(mut fees_then_tips: impl Iterator) { + fn on_unbalanceds(mut fees_then_tips: impl Iterator) { if let Some(fees) = fees_then_tips.next() { // for fees, 80% to treasury, 20% to author let mut split = fees.ration(80, 20); @@ -256,14 +255,20 @@ parameter_types! { } /// The type used to represent the kinds of proxying allowed. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, MaxEncodedLen)] +#[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, MaxEncodedLen, +)] pub enum ProxyType { Any, NonTransfer, Governance, Staking, } -impl Default for ProxyType { fn default() -> Self { Self::Any } } +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} impl InstanceFilter for ProxyType { fn filter(&self, c: &Call) -> bool { match self { @@ -271,19 +276,16 @@ impl InstanceFilter for ProxyType { ProxyType::NonTransfer => !matches!( c, Call::Balances(..) | - Call::Assets(..) | - Call::Uniques(..) | - Call::Vesting(pallet_vesting::Call::vested_transfer(..)) | - Call::Indices(pallet_indices::Call::transfer(..)) + Call::Assets(..) | Call::Uniques(..) | + Call::Vesting(pallet_vesting::Call::vested_transfer(..)) | + Call::Indices(pallet_indices::Call::transfer(..)) ), ProxyType::Governance => matches!( c, Call::Democracy(..) | - Call::Council(..) | - Call::Society(..) | - Call::TechnicalCommittee(..) | - Call::Elections(..) | - Call::Treasury(..) + Call::Council(..) | Call::Society(..) | + Call::TechnicalCommittee(..) | + Call::Elections(..) | Call::Treasury(..) ), ProxyType::Staking => matches!(c, Call::Staking(..)), } @@ -500,15 +502,16 @@ impl pallet_staking::Config for Runtime { type SlashCancelOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>, >; type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionProvider = ElectionProviderMultiPhase; - type GenesisElectionProvider = - onchain::OnChainSequentialPhragmen>; + type GenesisElectionProvider = onchain::OnChainSequentialPhragmen< + pallet_election_provider_multi_phase::OnChainConfig, + >; type WeightInfo = pallet_staking::weights::SubstrateWeight; } @@ -618,20 +621,26 @@ impl pallet_democracy::Config for Runtime { type VotingPeriod = VotingPeriod; type MinimumDeposit = MinimumDeposit; /// A straight majority of the council can decide what their next motion is. - type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; + type ExternalOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; /// A super-majority can have the next scheduled referendum be a straight majority-carries vote. - type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; + type ExternalMajorityOrigin = + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; /// A unanimous council can have the next scheduled referendum be a straight default-carries /// (NTB) vote. - type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; + type ExternalDefaultOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote /// be tabled immediately and with a shorter voting/enactment period. - type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; - type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; + type FastTrackOrigin = + pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; + type InstantOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; type InstantAllowed = InstantAllowed; type FastTrackVotingPeriod = FastTrackVotingPeriod; // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; + type CancellationOrigin = + pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; // To cancel a proposal before it has been passed, the technical committee must be unanimous or // Root must agree. type CancelProposalOrigin = EnsureOneOf< @@ -728,7 +737,7 @@ impl pallet_collective::Config for Runtime { type EnsureRootOrHalfCouncil = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>, >; impl pallet_membership::Config for Runtime { type Event = Event; @@ -768,12 +777,12 @@ impl pallet_treasury::Config for Runtime { type ApproveOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>, >; type RejectOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>, >; type Event = Event; type OnSlash = (); @@ -876,8 +885,8 @@ parameter_types! { } impl frame_system::offchain::CreateSignedTransaction for Runtime - where - Call: From, +where + Call: From, { fn create_transaction>( call: Call, @@ -887,10 +896,8 @@ impl frame_system::offchain::CreateSignedTransaction for R ) -> Option<(Call, ::SignaturePayload)> { let tip = 0; // take the biggest period possible. - let period = BlockHashCount::get() - .checked_next_power_of_two() - .map(|c| c / 2) - .unwrap_or(2) as u64; + let period = + BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let current_block = System::block_number() .saturated_into::() // The `System::block_number` is initialized with `n+1`, @@ -911,10 +918,7 @@ impl frame_system::offchain::CreateSignedTransaction for R log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; - let signature = raw_payload - .using_encoded(|payload| { - C::sign(payload, public) - })?; + let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); let (call, extra, _) = raw_payload.deconstruct(); Some((call, (address, signature.into(), extra))) @@ -926,7 +930,8 @@ impl frame_system::offchain::SigningTypes for Runtime { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime +where Call: From, { type Extrinsic = UncheckedExtrinsic; @@ -965,8 +970,11 @@ impl pallet_grandpa::Config for Runtime { GrandpaId, )>>::IdentificationTuple; - type HandleEquivocation = - pallet_grandpa::EquivocationHandler; + type HandleEquivocation = pallet_grandpa::EquivocationHandler< + Self::KeyOwnerIdentification, + Offences, + ReportLongevity, + >; type WeightInfo = (); } @@ -1036,7 +1044,8 @@ impl pallet_society::Config for Runtime { type MembershipChanged = (); type RotationPeriod = RotationPeriod; type MaxLockDuration = MaxLockDuration; - type FounderSetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type FounderSetOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; type SuspensionJudgementOrigin = pallet_society::EnsureFounder; type MaxCandidateIntake = MaxCandidateIntake; type ChallengePeriod = ChallengePeriod; @@ -1261,11 +1270,7 @@ mod mmr { use super::Runtime; pub use pallet_mmr::primitives::*; - pub type Leaf = < - ::LeafData - as - LeafDataProvider - >::LeafData; + pub type Leaf = <::LeafData as LeafDataProvider>::LeafData; pub type Hash = ::Hash; pub type Hashing = ::Hashing; } @@ -1613,9 +1618,11 @@ mod tests { #[test] fn validate_transaction_submitter_bounds() { - fn is_submit_signed_transaction() where + fn is_submit_signed_transaction() + where T: CreateSignedTransaction, - {} + { + } is_submit_signed_transaction::(); } diff --git a/substrate/bin/node/test-runner-example/src/lib.rs b/substrate/bin/node/test-runner-example/src/lib.rs index 513c8a7d8b5c821d886069fa8a0945cfe9438116..f0b306db6b0c15d24e4457a8cbbea0e17dcbf027 100644 --- a/substrate/bin/node/test-runner-example/src/lib.rs +++ b/substrate/bin/node/test-runner-example/src/lib.rs @@ -19,12 +19,12 @@ //! Basic example of end to end runtime tests. -use test_runner::{ChainInfo, SignatureVerificationOverride}; use grandpa::GrandpaBlockImport; -use sc_service::{TFullBackend, TFullClient}; use sc_consensus_babe::BabeBlockImport; use sc_consensus_manual_seal::consensus::babe::SlotTimestampProvider; +use sc_service::{TFullBackend, TFullClient}; use sp_runtime::generic::Era; +use test_runner::{ChainInfo, SignatureVerificationOverride}; type BlockImport = BabeBlockImport>; @@ -54,15 +54,20 @@ impl ChainInfo for NodeTemplateChainInfo { Self::SelectChain, >; type SignedExtras = node_runtime::SignedExtra; - type InherentDataProviders = (SlotTimestampProvider, sp_consensus_babe::inherents::InherentDataProvider); + type InherentDataProviders = + (SlotTimestampProvider, sp_consensus_babe::inherents::InherentDataProvider); - fn signed_extras(from: ::AccountId) -> Self::SignedExtras { + fn signed_extras( + from: ::AccountId, + ) -> Self::SignedExtras { ( frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), frame_system::CheckGenesis::::new(), frame_system::CheckMortality::::from(Era::Immortal), - frame_system::CheckNonce::::from(frame_system::Pallet::::account_nonce(from)), + frame_system::CheckNonce::::from( + frame_system::Pallet::::account_nonce(from), + ), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(0), ) @@ -72,32 +77,43 @@ impl ChainInfo for NodeTemplateChainInfo { #[cfg(test)] mod tests { use super::*; - use test_runner::{Node, client_parts, ConfigOrChainSpec, build_runtime, task_executor}; - use sp_keyring::sr25519::Keyring::Alice; use node_cli::chain_spec::development_config; + use sp_keyring::sr25519::Keyring::Alice; use sp_runtime::{traits::IdentifyAccount, MultiSigner}; + use test_runner::{build_runtime, client_parts, task_executor, ConfigOrChainSpec, Node}; #[test] fn test_runner() { let mut tokio_runtime = build_runtime().unwrap(); let task_executor = task_executor(tokio_runtime.handle().clone()); - let (rpc, task_manager, client, pool, command_sink, backend) = - client_parts::( - ConfigOrChainSpec::ChainSpec(Box::new(development_config()), task_executor) - ).unwrap(); - let node = Node::::new(rpc, task_manager, client, pool, command_sink, backend); + let (rpc, task_manager, client, pool, command_sink, backend) = client_parts::< + NodeTemplateChainInfo, + >( + ConfigOrChainSpec::ChainSpec(Box::new(development_config()), task_executor), + ) + .unwrap(); + let node = Node::::new( + rpc, + task_manager, + client, + pool, + command_sink, + backend, + ); tokio_runtime.block_on(async { // seals blocks node.seal_blocks(1).await; // submit extrinsics let alice = MultiSigner::from(Alice.public()).into_account(); - let _hash = node.submit_extrinsic(frame_system::Call::remark((b"hello world").to_vec()), alice) + let _hash = node + .submit_extrinsic(frame_system::Call::remark((b"hello world").to_vec()), alice) .await .unwrap(); // look ma, I can read state. - let _events = node.with_state(|| frame_system::Pallet::::events()); + let _events = + node.with_state(|| frame_system::Pallet::::events()); // get access to the underlying client. let _client = node.client(); }) diff --git a/substrate/bin/node/testing/src/bench.rs b/substrate/bin/node/testing/src/bench.rs index edb99c617771a0678269de28c335dcefb3bd2115..ceca493874dcb22caf97668027fa84fcd6c4f179 100644 --- a/substrate/bin/node/testing/src/bench.rs +++ b/substrate/bin/node/testing/src/bench.rs @@ -22,44 +22,42 @@ //! can pregenerate seed database and `clone` it for every iteration of your benchmarks //! or tests to get consistent, smooth benchmark experience! -use std::{sync::Arc, path::{Path, PathBuf}, collections::BTreeMap}; +use std::{ + collections::BTreeMap, + path::{Path, PathBuf}, + sync::Arc, +}; +use crate::{ + client::{Backend, Client}, + keyring::*, +}; +use codec::{Decode, Encode}; +use futures::executor; use node_primitives::Block; -use crate::client::{Client, Backend}; -use crate::keyring::*; +use node_runtime::{ + constants::currency::DOLLARS, AccountId, BalancesCall, Call, CheckedExtrinsic, MinimumPeriod, + Signature, SystemCall, UncheckedExtrinsic, +}; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + execution_extensions::{ExecutionExtensions, ExecutionStrategies}, + BlockBackend, ExecutionStrategy, +}; use sc_client_db::PruningMode; use sc_executor::{NativeExecutor, WasmExecutionMethod}; +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder; use sp_consensus::{ - BlockOrigin, BlockImport, BlockImportParams, - ForkChoiceStrategy, ImportResult, ImportedAux + BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, ImportResult, ImportedAux, }; +use sp_core::{blake2_256, ed25519, sr25519, traits::SpawnNamed, ExecutionContext, Pair, Public}; +use sp_inherents::InherentData; use sp_runtime::{ generic::BlockId, + traits::{Block as BlockT, IdentifyAccount, Verify, Zero}, OpaqueExtrinsic, - traits::{Block as BlockT, Verify, Zero, IdentifyAccount}, -}; -use codec::{Decode, Encode}; -use node_runtime::{ - Call, - CheckedExtrinsic, - constants::currency::DOLLARS, - UncheckedExtrinsic, - MinimumPeriod, - SystemCall, - BalancesCall, - AccountId, - Signature, -}; -use sp_core::{ExecutionContext, blake2_256, traits::SpawnNamed, Pair, Public, sr25519, ed25519}; -use sp_api::ProvideRuntimeApi; -use sp_block_builder::BlockBuilder; -use sp_inherents::InherentData; -use sc_client_api::{ - ExecutionStrategy, BlockBackend, - execution_extensions::{ExecutionExtensions, ExecutionStrategies}, }; -use sc_block_builder::BlockBuilderProvider; -use futures::executor; /// Keyring full of accounts for benching. /// @@ -92,19 +90,21 @@ impl BenchPair { /// /// Will panic if cache drop is impossbile. pub fn drop_system_cache() { - #[cfg(target_os = "windows")] { + #[cfg(target_os = "windows")] + { log::warn!( target: "bench-logistics", "Clearing system cache on windows is not supported. Benchmark might totally be wrong.", ); - return; + return } std::process::Command::new("sync") .output() .expect("Failed to execute system cache clear"); - #[cfg(target_os = "linux")] { + #[cfg(target_os = "linux")] + { log::trace!(target: "bench-logistics", "Clearing system cache..."); std::process::Command::new("echo") .args(&["3", ">", "/proc/sys/vm/drop_caches", "2>", "/dev/null"]) @@ -133,7 +133,8 @@ pub fn drop_system_cache() { log::trace!(target: "bench-logistics", "Clearing system cache done!"); } - #[cfg(target_os = "macos")] { + #[cfg(target_os = "macos")] + { log::trace!(target: "bench-logistics", "Clearing system cache..."); if let Err(err) = std::process::Command::new("purge").output() { log::error!("purge error {:?}: ", err); @@ -169,15 +170,10 @@ impl Clone for BenchDb { ); let seed_db_files = std::fs::read_dir(seed_dir) .expect("failed to list file in seed dir") - .map(|f_result| - f_result.expect("failed to read file in seed db") - .path() - ).collect::>(); - fs_extra::copy_items( - &seed_db_files, - dir.path(), - &fs_extra::dir::CopyOptions::new(), - ).expect("Copy of seed database is ok"); + .map(|f_result| f_result.expect("failed to read file in seed db").path()) + .collect::>(); + fs_extra::copy_items(&seed_db_files, dir.path(), &fs_extra::dir::CopyOptions::new()) + .expect("Copy of seed database is ok"); // We clear system cache after db clone but before any warmups. // This populates system cache with some data unrelated to actual @@ -204,10 +200,7 @@ pub enum BlockType { impl BlockType { /// Create block content description with specified number of transactions. pub fn to_content(self, size: Option) -> BlockContent { - BlockContent { - block_type: self, - size, - } + BlockContent { block_type: self, size } } } @@ -230,13 +223,8 @@ pub enum DatabaseType { impl DatabaseType { fn into_settings(self, path: PathBuf) -> sc_client_db::DatabaseSettingsSrc { match self { - Self::RocksDb => sc_client_db::DatabaseSettingsSrc::RocksDb { - path, - cache_size: 512, - }, - Self::ParityDb => sc_client_db::DatabaseSettingsSrc::ParityDb { - path, - } + Self::RocksDb => sc_client_db::DatabaseSettingsSrc::RocksDb { path, cache_size: 512 }, + Self::ParityDb => sc_client_db::DatabaseSettingsSrc::ParityDb { path }, } } } @@ -251,10 +239,7 @@ pub struct TaskExecutor { impl TaskExecutor { fn new() -> Self { - Self { - pool: executor::ThreadPool::new() - .expect("Failed to create task executor") - } + Self { pool: executor::ThreadPool::new().expect("Failed to create task executor") } } } @@ -279,21 +264,17 @@ pub struct BlockContentIterator<'a> { impl<'a> BlockContentIterator<'a> { fn new(content: BlockContent, keyring: &'a BenchKeyring, client: &Client) -> Self { - let runtime_version = client.runtime_version_at(&BlockId::number(0)) + let runtime_version = client + .runtime_version_at(&BlockId::number(0)) .expect("There should be runtime version at 0"); - let genesis_hash = client.block_hash(Zero::zero()) + let genesis_hash = client + .block_hash(Zero::zero()) .expect("Database error?") .expect("Genesis block always exists; qed") .into(); - BlockContentIterator { - iteration: 0, - content, - keyring, - runtime_version, - genesis_hash, - } + BlockContentIterator { iteration: 0, content, keyring, runtime_version, genesis_hash } } } @@ -302,41 +283,36 @@ impl<'a> Iterator for BlockContentIterator<'a> { fn next(&mut self) -> Option { if self.content.size.map(|size| size <= self.iteration).unwrap_or(false) { - return None; + return None } let sender = self.keyring.at(self.iteration); - let receiver = get_account_id_from_seed::( - &format!("random-user//{}", self.iteration) - ); + let receiver = get_account_id_from_seed::(&format!( + "random-user//{}", + self.iteration + )); let signed = self.keyring.sign( CheckedExtrinsic { - signed: Some((sender, signed_extra(0, node_runtime::ExistentialDeposit::get() + 1))), + signed: Some(( + sender, + signed_extra(0, node_runtime::ExistentialDeposit::get() + 1), + )), function: match self.content.block_type { - BlockType::RandomTransfersKeepAlive => { - Call::Balances( - BalancesCall::transfer_keep_alive( - sp_runtime::MultiAddress::Id(receiver), - node_runtime::ExistentialDeposit::get() + 1, - ) - ) - }, + BlockType::RandomTransfersKeepAlive => + Call::Balances(BalancesCall::transfer_keep_alive( + sp_runtime::MultiAddress::Id(receiver), + node_runtime::ExistentialDeposit::get() + 1, + )), BlockType::RandomTransfersReaping => { - Call::Balances( - BalancesCall::transfer( - sp_runtime::MultiAddress::Id(receiver), - // Transfer so that ending balance would be 1 less than existential deposit - // so that we kill the sender account. - 100*DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), - ) - ) - }, - BlockType::Noop => { - Call::System( - SystemCall::remark(Vec::new()) - ) + Call::Balances(BalancesCall::transfer( + sp_runtime::MultiAddress::Id(receiver), + // Transfer so that ending balance would be 1 less than existential deposit + // so that we kill the sender account. + 100 * DOLLARS - (node_runtime::ExistentialDeposit::get() - 1), + )) }, + BlockType::Noop => Call::System(SystemCall::remark(Vec::new())), }, }, self.runtime_version.spec_version, @@ -346,8 +322,7 @@ impl<'a> Iterator for BlockContentIterator<'a> { let encoded = Encode::encode(&signed); - let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]) - .expect("Failed to decode opaque"); + let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]).expect("Failed to decode opaque"); self.iteration += 1; @@ -373,12 +348,8 @@ impl BenchDb { "Created seed db at {}", dir.path().to_string_lossy(), ); - let (_client, _backend, _task_executor) = Self::bench_client( - database_type, - dir.path(), - Profile::Native, - &keyring, - ); + let (_client, _backend, _task_executor) = + Self::bench_client(database_type, dir.path(), Profile::Native, &keyring); let directory_guard = Guard(dir); BenchDb { keyring, directory_guard, database_type } @@ -408,7 +379,7 @@ impl BenchDb { keyring: &BenchKeyring, ) -> (Client, std::sync::Arc, TaskExecutor) { let db_config = sc_client_db::DatabaseSettings { - state_cache_size: 16*1024*1024, + state_cache_size: 16 * 1024 * 1024, state_cache_child_ratio: Some((0, 100)), state_pruning: PruningMode::ArchiveAll, source: database_type.into_settings(dir.into()), @@ -429,7 +400,8 @@ impl BenchDb { None, None, Default::default(), - ).expect("Should not fail"); + ) + .expect("Should not fail"); (client, backend, task_executor) } @@ -445,12 +417,14 @@ impl BenchDb { .put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) .expect("Put timestamp failed"); - client.runtime_api() + client + .runtime_api() .inherent_extrinsics_with_context( &BlockId::number(0), ExecutionContext::BlockConstruction, inherent_data, - ).expect("Get inherents failed") + ) + .expect("Get inherents failed") } /// Iterate over some block content with transaction signed using this database keyring. @@ -474,9 +448,7 @@ impl BenchDb { pub fn generate_block(&mut self, content: BlockContent) -> Block { let client = self.client(); - let mut block = client - .new_block(Default::default()) - .expect("Block creation failed"); + let mut block = client.new_block(Default::default()).expect("Block creation failed"); for extrinsic in self.generate_inherents(&client) { block.push(extrinsic).expect("Push inherent failed"); @@ -486,14 +458,12 @@ impl BenchDb { for opaque in self.block_content(content, &client) { match block.push(opaque) { Err(sp_blockchain::Error::ApplyExtrinsicFailed( - sp_blockchain::ApplyExtrinsicFailed::Validity(e) - )) if e.exhausted_resources() => { - break; - }, + sp_blockchain::ApplyExtrinsicFailed::Validity(e), + )) if e.exhausted_resources() => break, Err(err) => panic!("Error pushing transaction: {:?}", err), Ok(_) => {}, } - }; + } let block = block.build().expect("Block build failed").block; @@ -514,12 +484,8 @@ impl BenchDb { /// Clone this database and create context for testing/benchmarking. pub fn create_context(&self, profile: Profile) -> BenchContext { let BenchDb { directory_guard, keyring, database_type } = self.clone(); - let (client, backend, task_executor) = Self::bench_client( - database_type, - directory_guard.path(), - profile, - &keyring - ); + let (client, backend, task_executor) = + Self::bench_client(database_type, directory_guard.path(), profile, &keyring); BenchContext { client: Arc::new(client), @@ -549,7 +515,8 @@ impl BenchKeyring { let seed = format!("//endowed-user/{}", n); let (account_id, pair) = match key_types { KeyTypes::Sr25519 => { - let pair = sr25519::Pair::from_string(&seed, None).expect("failed to generate pair"); + let pair = + sr25519::Pair::from_string(&seed, None).expect("failed to generate pair"); let account_id = AccountPublic::from(pair.public()).into_account(); (account_id, BenchPair::Sr25519(pair)) }, @@ -581,28 +548,34 @@ impl BenchKeyring { xt: CheckedExtrinsic, spec_version: u32, tx_version: u32, - genesis_hash: [u8; 32] + genesis_hash: [u8; 32], ) -> UncheckedExtrinsic { match xt.signed { Some((signed, extra)) => { - let payload = (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + let payload = ( + xt.function, + extra.clone(), + spec_version, + tx_version, + genesis_hash, + genesis_hash, + ); let key = self.accounts.get(&signed).expect("Account id not found in keyring"); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&sp_io::hashing::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&sp_io::hashing::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); UncheckedExtrinsic { signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, }, + None => UncheckedExtrinsic { signature: None, function: xt.function }, } } @@ -641,7 +614,7 @@ impl Profile { block_construction: ExecutionStrategy::NativeElseWasm, offchain_worker: ExecutionStrategy::NativeElseWasm, other: ExecutionStrategy::NativeElseWasm, - } + }, } } } @@ -676,7 +649,7 @@ fn get_from_seed(seed: &str) -> ::Public fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> + AccountPublic: From<::Public>, { AccountPublic::from(get_from_seed::(seed)).into_account() } @@ -684,24 +657,25 @@ where impl BenchContext { /// Import some block. pub fn import_block(&mut self, block: Block) { - let mut import_params = BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone()); + let mut import_params = + BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone()); import_params.body = Some(block.extrinsics().to_vec()); import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); assert_eq!(self.client.chain_info().best_number, 0); assert_eq!( - futures::executor::block_on(self.client.import_block(import_params, Default::default())) - .expect("Failed to import block"), - ImportResult::Imported( - ImportedAux { - header_only: false, - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - is_new_best: true, - } + futures::executor::block_on( + self.client.import_block(import_params, Default::default()) ) + .expect("Failed to import block"), + ImportResult::Imported(ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + is_new_best: true, + }) ); assert_eq!(self.client.chain_info().best_number, 1); diff --git a/substrate/bin/node/testing/src/client.rs b/substrate/bin/node/testing/src/client.rs index d53519950dc1e137f0e556a5ed93689bcddf8e2c..9538cd47d88a6e3ed029989ede36db328eb2c793 100644 --- a/substrate/bin/node/testing/src/client.rs +++ b/substrate/bin/node/testing/src/client.rs @@ -18,8 +18,8 @@ //! Utilities to build a `TestClient` for `node-runtime`. -use sp_runtime::BuildStorage; use sc_service::client; +use sp_runtime::BuildStorage; /// Re-export test-client utilities. pub use substrate_test_client::*; @@ -61,13 +61,15 @@ pub trait TestClientBuilderExt: Sized { fn build(self) -> Client; } -impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< - node_primitives::Block, - client::LocalCallExecutor, - Backend, - GenesisParameters, -> { - fn new() -> Self{ +impl TestClientBuilderExt + for substrate_test_client::TestClientBuilder< + node_primitives::Block, + client::LocalCallExecutor, + Backend, + GenesisParameters, + > +{ + fn new() -> Self { Self::default() } @@ -75,5 +77,3 @@ impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< self.build_with_native_executor(None).0 } } - - diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs index 3a6d51f1971ed65c1dcb0b8eda4f6ebe5571d00e..50c1e6f9d20beab50984d5ccdf2367b3e78831fd 100644 --- a/substrate/bin/node/testing/src/genesis.rs +++ b/substrate/bin/node/testing/src/genesis.rs @@ -19,14 +19,13 @@ //! Genesis Configuration. use crate::keyring::*; -use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use node_runtime::{ - GenesisConfig, BalancesConfig, SessionConfig, StakingConfig, SystemConfig, - GrandpaConfig, IndicesConfig, SocietyConfig, wasm_binary_unwrap, - AccountId, StakerStatus, BabeConfig, BABE_GENESIS_EPOCH_CONFIG, + constants::currency::*, wasm_binary_unwrap, AccountId, BabeConfig, BalancesConfig, + GenesisConfig, GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, StakerStatus, + StakingConfig, SystemConfig, BABE_GENESIS_EPOCH_CONFIG, }; -use node_runtime::constants::currency::*; use sp_core::ChangesTrieConfiguration; +use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. @@ -41,7 +40,6 @@ pub fn config_endowed( code: Option<&[u8]>, extra_endowed: Vec, ) -> GenesisConfig { - let mut endowed = vec![ (alice(), 111 * DOLLARS), (bob(), 100 * DOLLARS), @@ -51,59 +49,44 @@ pub fn config_endowed( (ferdie(), 100 * DOLLARS), ]; - endowed.extend( - extra_endowed.into_iter().map(|endowed| (endowed, 100*DOLLARS)) - ); + endowed.extend(extra_endowed.into_iter().map(|endowed| (endowed, 100 * DOLLARS))); GenesisConfig { system: SystemConfig { - changes_trie_config: if support_changes_trie { Some(ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }) } else { None }, + changes_trie_config: if support_changes_trie { + Some(ChangesTrieConfiguration { digest_interval: 2, digest_levels: 2 }) + } else { + None + }, code: code.map(|x| x.to_vec()).unwrap_or_else(|| wasm_binary_unwrap().to_vec()), }, - indices: IndicesConfig { - indices: vec![], - }, - balances: BalancesConfig { - balances: endowed, - }, + indices: IndicesConfig { indices: vec![] }, + balances: BalancesConfig { balances: endowed }, session: SessionConfig { keys: vec![ - (dave(), alice(), to_session_keys( - &Ed25519Keyring::Alice, - &Sr25519Keyring::Alice, - )), - (eve(), bob(), to_session_keys( - &Ed25519Keyring::Bob, - &Sr25519Keyring::Bob, - )), - (ferdie(), charlie(), to_session_keys( - &Ed25519Keyring::Charlie, - &Sr25519Keyring::Charlie, - )), - ] + (dave(), alice(), to_session_keys(&Ed25519Keyring::Alice, &Sr25519Keyring::Alice)), + (eve(), bob(), to_session_keys(&Ed25519Keyring::Bob, &Sr25519Keyring::Bob)), + ( + ferdie(), + charlie(), + to_session_keys(&Ed25519Keyring::Charlie, &Sr25519Keyring::Charlie), + ), + ], }, staking: StakingConfig { stakers: vec![ (dave(), alice(), 111 * DOLLARS, StakerStatus::Validator), (eve(), bob(), 100 * DOLLARS, StakerStatus::Validator), - (ferdie(), charlie(), 100 * DOLLARS, StakerStatus::Validator) + (ferdie(), charlie(), 100 * DOLLARS, StakerStatus::Validator), ], validator_count: 3, minimum_validator_count: 0, slash_reward_fraction: Perbill::from_percent(10), invulnerables: vec![alice(), bob(), charlie()], - .. Default::default() - }, - babe: BabeConfig { - authorities: vec![], - epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), - }, - grandpa: GrandpaConfig { - authorities: vec![], + ..Default::default() }, + babe: BabeConfig { authorities: vec![], epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG) }, + grandpa: GrandpaConfig { authorities: vec![] }, im_online: Default::default(), authority_discovery: Default::default(), democracy: Default::default(), @@ -113,11 +96,7 @@ pub fn config_endowed( elections: Default::default(), sudo: Default::default(), treasury: Default::default(), - society: SocietyConfig { - members: vec![alice(), bob()], - pot: 0, - max_members: 999, - }, + society: SocietyConfig { members: vec![alice(), bob()], pot: 0, max_members: 999 }, vesting: Default::default(), gilt: Default::default(), transaction_storage: Default::default(), diff --git a/substrate/bin/node/testing/src/keyring.rs b/substrate/bin/node/testing/src/keyring.rs index da61040206ea4f8a41bc963226bb8a70eb1acdea..4e2d88b4bba335a57cfb7e957e36bc098962e1fe 100644 --- a/substrate/bin/node/testing/src/keyring.rs +++ b/substrate/bin/node/testing/src/keyring.rs @@ -18,11 +18,11 @@ //! Test accounts. -use sp_keyring::{AccountKeyring, Sr25519Keyring, Ed25519Keyring}; +use codec::Encode; use node_primitives::{AccountId, Balance, Index}; -use node_runtime::{CheckedExtrinsic, UncheckedExtrinsic, SessionKeys, SignedExtra}; +use node_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; +use sp_keyring::{AccountKeyring, Ed25519Keyring, Sr25519Keyring}; use sp_runtime::generic::Era; -use codec::Encode; /// Alice's account id. pub fn alice() -> AccountId { @@ -81,26 +81,31 @@ pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { } /// Sign given `CheckedExtrinsic`. -pub fn sign(xt: CheckedExtrinsic, spec_version: u32, tx_version: u32, genesis_hash: [u8; 32]) -> UncheckedExtrinsic { +pub fn sign( + xt: CheckedExtrinsic, + spec_version: u32, + tx_version: u32, + genesis_hash: [u8; 32], +) -> UncheckedExtrinsic { match xt.signed { Some((signed, extra)) => { - let payload = (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); + let payload = + (xt.function, extra.clone(), spec_version, tx_version, genesis_hash, genesis_hash); let key = AccountKeyring::from_account_id(&signed).unwrap(); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&sp_io::hashing::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&sp_io::hashing::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); UncheckedExtrinsic { signature: Some((sp_runtime::MultiAddress::Id(signed), signature, extra)), function: payload.0, } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, }, + None => UncheckedExtrinsic { signature: None, function: xt.function }, } } diff --git a/substrate/bin/node/testing/src/lib.rs b/substrate/bin/node/testing/src/lib.rs index c5792bccee80da8f787424517653d484fe2f2ce6..a3392bcb29d5d74a99aba53918aecb5c5e166be6 100644 --- a/substrate/bin/node/testing/src/lib.rs +++ b/substrate/bin/node/testing/src/lib.rs @@ -20,7 +20,7 @@ #![warn(missing_docs)] +pub mod bench; pub mod client; pub mod genesis; pub mod keyring; -pub mod bench; diff --git a/substrate/bin/utils/chain-spec-builder/src/main.rs b/substrate/bin/utils/chain-spec-builder/src/main.rs index a3f8eaa1f8547464230ebd5f762ecf92d7d8fc36..60d46dcfeee53c9f494ddc720a57bae7b36348c3 100644 --- a/substrate/bin/utils/chain-spec-builder/src/main.rs +++ b/substrate/bin/utils/chain-spec-builder/src/main.rs @@ -16,19 +16,23 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{fs, path::{Path, PathBuf}, sync::Arc}; +use std::{ + fs, + path::{Path, PathBuf}, + sync::Arc, +}; use ansi_term::Style; -use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; +use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; use structopt::StructOpt; -use sc_keystore::LocalKeystore; use node_cli::chain_spec::{self, AccountId}; +use sc_keystore::LocalKeystore; use sp_core::{ - sr25519, crypto::{Public, Ss58Codec}, + sr25519, }; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// A utility to easily create a testnet chain spec definition with a given set /// of authorities and endowed accounts and/or generate random accounts. @@ -86,10 +90,8 @@ impl ChainSpecBuilder { /// Returns the path where the chain spec should be saved. fn chain_spec_path(&self) -> &Path { match self { - ChainSpecBuilder::New { chain_spec_path, .. } => - chain_spec_path.as_path(), - ChainSpecBuilder::Generate { chain_spec_path, .. } => - chain_spec_path.as_path(), + ChainSpecBuilder::New { chain_spec_path, .. } => chain_spec_path.as_path(), + ChainSpecBuilder::Generate { chain_spec_path, .. } => chain_spec_path.as_path(), } } } @@ -125,11 +127,15 @@ fn generate_chain_spec( .map_err(|err| format!("Failed to parse account address: {:?}", err)) }; - let nominator_accounts = - nominator_accounts.into_iter().map(parse_account).collect::, String>>()?; + let nominator_accounts = nominator_accounts + .into_iter() + .map(parse_account) + .collect::, String>>()?; - let endowed_accounts = - endowed_accounts.into_iter().map(parse_account).collect::, String>>()?; + let endowed_accounts = endowed_accounts + .into_iter() + .map(parse_account) + .collect::, String>>()?; let sudo_account = parse_account(sudo_account)?; @@ -137,7 +143,14 @@ fn generate_chain_spec( "Custom", "custom", sc_chain_spec::ChainType::Live, - move || genesis_constructor(&authority_seeds, &nominator_accounts, &endowed_accounts, &sudo_account), + move || { + genesis_constructor( + &authority_seeds, + &nominator_accounts, + &endowed_accounts, + &sudo_account, + ) + }, vec![], None, None, @@ -148,42 +161,26 @@ fn generate_chain_spec( chain_spec.as_json(false).map_err(|err| err) } -fn generate_authority_keys_and_store( - seeds: &[String], - keystore_path: &Path, -) -> Result<(), String> { +fn generate_authority_keys_and_store(seeds: &[String], keystore_path: &Path) -> Result<(), String> { for (n, seed) in seeds.into_iter().enumerate() { - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open( - keystore_path.join(format!("auth-{}", n)), - None, - ).map_err(|err| err.to_string())?); + let keystore: SyncCryptoStorePtr = Arc::new( + LocalKeystore::open(keystore_path.join(format!("auth-{}", n)), None) + .map_err(|err| err.to_string())?, + ); let (_, _, grandpa, babe, im_online, authority_discovery) = chain_spec::authority_keys_from_seed(seed); let insert_key = |key_type, public| { - SyncCryptoStore::insert_unknown( - &*keystore, - key_type, - &format!("//{}", seed), - public, - ).map_err(|_| format!("Failed to insert key: {}", grandpa)) + SyncCryptoStore::insert_unknown(&*keystore, key_type, &format!("//{}", seed), public) + .map_err(|_| format!("Failed to insert key: {}", grandpa)) }; - insert_key( - sp_core::crypto::key_types::BABE, - babe.as_slice(), - )?; + insert_key(sp_core::crypto::key_types::BABE, babe.as_slice())?; - insert_key( - sp_core::crypto::key_types::GRANDPA, - grandpa.as_slice(), - )?; + insert_key(sp_core::crypto::key_types::GRANDPA, grandpa.as_slice())?; - insert_key( - sp_core::crypto::key_types::IM_ONLINE, - im_online.as_slice(), - )?; + insert_key(sp_core::crypto::key_types::IM_ONLINE, im_online.as_slice())?; insert_key( sp_core::crypto::key_types::AUTHORITY_DISCOVERY, @@ -206,10 +203,7 @@ fn print_seeds( println!("{}", header.paint("Authority seeds")); for (n, seed) in authority_seeds.iter().enumerate() { - println!("{} //{}", - entry.paint(format!("auth-{}:", n)), - seed, - ); + println!("{} //{}", entry.paint(format!("auth-{}:", n)), seed,); } println!("{}", header.paint("Nominator seeds")); @@ -223,10 +217,7 @@ fn print_seeds( if !endowed_seeds.is_empty() { println!("{}", header.paint("Endowed seeds")); for (n, seed) in endowed_seeds.iter().enumerate() { - println!("{} //{}", - entry.paint(format!("endowed-{}:", n)), - seed, - ); + println!("{} //{}", entry.paint(format!("endowed-{}:", n)), seed,); } println!(); @@ -260,10 +251,7 @@ fn main() -> Result<(), String> { print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); if let Some(keystore_path) = keystore_path { - generate_authority_keys_and_store( - &authority_seeds, - &keystore_path, - )?; + generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; } let nominator_accounts = nominator_seeds @@ -284,7 +272,7 @@ fn main() -> Result<(), String> { chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) - } + }, ChainSpecBuilder::New { authority_seeds, nominator_accounts, @@ -294,12 +282,8 @@ fn main() -> Result<(), String> { } => (authority_seeds, nominator_accounts, endowed_accounts, sudo_account), }; - let json = generate_chain_spec( - authority_seeds, - nominator_accounts, - endowed_accounts, - sudo_account, - )?; + let json = + generate_chain_spec(authority_seeds, nominator_accounts, endowed_accounts, sudo_account)?; fs::write(chain_spec_path, json).map_err(|err| err.to_string()) } diff --git a/substrate/bin/utils/subkey/src/lib.rs b/substrate/bin/utils/subkey/src/lib.rs index 5e9f04418a6b56a5c1be6a405385290d35bb7f1a..5052d1b104c2ca7045b1569be0dc973a2003292e 100644 --- a/substrate/bin/utils/subkey/src/lib.rs +++ b/substrate/bin/utils/subkey/src/lib.rs @@ -16,17 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use structopt::StructOpt; use sc_cli::{ - Error, VanityCmd, SignCmd, VerifyCmd, GenerateNodeKeyCmd, GenerateCmd, InspectKeyCmd, - InspectNodeKeyCmd + Error, GenerateCmd, GenerateNodeKeyCmd, InspectKeyCmd, InspectNodeKeyCmd, SignCmd, VanityCmd, + VerifyCmd, }; +use structopt::StructOpt; #[derive(Debug, StructOpt)] #[structopt( name = "subkey", author = "Parity Team ", - about = "Utility for generating and restoring with Substrate keys", + about = "Utility for generating and restoring with Substrate keys" )] pub enum Subkey { /// Generate a random node libp2p key, save it to file or print it to stdout diff --git a/substrate/client/allocator/src/error.rs b/substrate/client/allocator/src/error.rs index e880e8d0ae75dfbf7fb7c66a7ecc1da73ae5f594..2b2cc127dcfb39a336bfb6e2162e72caa234422c 100644 --- a/substrate/client/allocator/src/error.rs +++ b/substrate/client/allocator/src/error.rs @@ -26,5 +26,5 @@ pub enum Error { AllocatorOutOfSpace, /// Some other error occurred. #[error("Other: {0}")] - Other(&'static str) + Other(&'static str), } diff --git a/substrate/client/allocator/src/freeing_bump.rs b/substrate/client/allocator/src/freeing_bump.rs index 7f83576aedfa660fc5725e0ca983e903e2138ce7..105ef954ddf105eca372a5fc8e168a5c076c4955 100644 --- a/substrate/client/allocator/src/freeing_bump.rs +++ b/substrate/client/allocator/src/freeing_bump.rs @@ -68,8 +68,12 @@ //! sizes. use crate::Error; -use std::{mem, convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}}; use sp_wasm_interface::{Pointer, WordSize}; +use std::{ + convert::{TryFrom, TryInto}, + mem, + ops::{Index, IndexMut, Range}, +}; /// The minimal alignment guaranteed by this allocator. /// @@ -139,7 +143,7 @@ impl Order { fn from_size(size: u32) -> Result { let clamped_size = if size > MAX_POSSIBLE_ALLOCATION { log::warn!(target: LOG_TARGET, "going to fail due to allocating {:?}", size); - return Err(Error::RequestedAllocationTooLarge); + return Err(Error::RequestedAllocationTooLarge) } else if size < MIN_POSSIBLE_ALLOCATION { MIN_POSSIBLE_ALLOCATION } else { @@ -216,7 +220,6 @@ impl Link { /// ``` /// /// ## Occupied header -/// /// ```ignore /// 64 32 0 // +--------------+-------------------+ @@ -290,9 +293,7 @@ struct FreeLists { impl FreeLists { /// Creates the free empty lists. fn new() -> Self { - Self { - heads: [Link::Nil; N_ORDERS] - } + Self { heads: [Link::Nil; N_ORDERS] } } /// Replaces a given link for the specified order and returns the old one. @@ -397,15 +398,11 @@ impl FreeingBumpHeapAllocator { self.free_lists[order] = next_free; header_ptr - } + }, Link::Nil => { // Corresponding free list is empty. Allocate a new item. - Self::bump( - &mut self.bumper, - order.size() + HEADER_SIZE, - mem.size(), - )? - } + Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem.size())? + }, }; // Write the order in the occupied header. @@ -440,7 +437,11 @@ impl FreeingBumpHeapAllocator { /// /// - `mem` - a slice representing the linear memory on which this allocator operates. /// - `ptr` - pointer to the allocated chunk - pub fn deallocate(&mut self, mem: &mut M, ptr: Pointer) -> Result<(), Error> { + pub fn deallocate( + &mut self, + mem: &mut M, + ptr: Pointer, + ) -> Result<(), Error> { if self.poisoned { return Err(error("the allocator has been poisoned")) } @@ -480,8 +481,13 @@ impl FreeingBumpHeapAllocator { /// the operation would exhaust the heap. fn bump(bumper: &mut u32, size: u32, heap_end: u32) -> Result { if *bumper + size > heap_end { - log::error!(target: LOG_TARGET, "running out of space with current bumper {}, mem size {}", bumper, heap_end); - return Err(Error::AllocatorOutOfSpace); + log::error!( + target: LOG_TARGET, + "running out of space with current bumper {}, mem size {}", + bumper, + heap_end + ); + return Err(Error::AllocatorOutOfSpace) } let res = *bumper; diff --git a/substrate/client/allocator/src/lib.rs b/substrate/client/allocator/src/lib.rs index a82c7542199d4c52f3ff20ff9d4379dcf38be773..4493db3c7d14610c7feae106900ca6e2e33ef445 100644 --- a/substrate/client/allocator/src/lib.rs +++ b/substrate/client/allocator/src/lib.rs @@ -25,5 +25,5 @@ mod error; mod freeing_bump; -pub use freeing_bump::FreeingBumpHeapAllocator; pub use error::Error; +pub use freeing_bump::FreeingBumpHeapAllocator; diff --git a/substrate/client/api/src/backend.rs b/substrate/client/api/src/backend.rs index b09995f887c4aaff091cfc06b6b53299a3303bab..965e0151c3cbaf04ce6a88816574493193edfec3 100644 --- a/substrate/client/api/src/backend.rs +++ b/substrate/client/api/src/backend.rs @@ -18,30 +18,32 @@ //! Substrate Client data backend -use std::sync::Arc; -use std::collections::{HashMap, HashSet}; -use sp_core::ChangesTrieConfigurationRange; -use sp_core::offchain::OffchainStorage; -use sp_runtime::{generic::BlockId, Justification, Justifications, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; -use sp_state_machine::{ - ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, OffchainChangesCollection, IndexOperation, -}; -use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ - blockchain::{ - Backend as BlockchainBackend, well_known_cache_keys - }, + blockchain::{well_known_cache_keys, Backend as BlockchainBackend}, light::RemoteBlockchain, UsageInfo, }; +use parking_lot::RwLock; use sp_blockchain; use sp_consensus::BlockOrigin; -use parking_lot::RwLock; +use sp_core::{offchain::OffchainStorage, ChangesTrieConfigurationRange}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, NumberFor}, + Justification, Justifications, Storage, +}; +use sp_state_machine::{ + ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, + ChildStorageCollection, IndexOperation, OffchainChangesCollection, StorageCollection, +}; +use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; -pub use sp_state_machine::Backend as StateBackend; pub use sp_consensus::ImportedState; +pub use sp_state_machine::Backend as StateBackend; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. @@ -90,16 +92,17 @@ pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, D, I>( insert: I, delete: D, ) -> sp_blockchain::Result<()> - where - Block: BlockT, - B: Backend, - I: IntoIterator, - D: IntoIterator, +where + Block: BlockT, + B: Backend, + I: IntoIterator, + D: IntoIterator, { operation.op.insert_aux( - insert.into_iter() + insert + .into_iter() .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - .chain(delete.into_iter().map(|k| (k.to_vec(), None))) + .chain(delete.into_iter().map(|k| (k.to_vec(), None))), ) } @@ -165,7 +168,11 @@ pub trait BlockImportOperation { /// Set genesis state. If `commit` is `false` the state is saved in memory, but is not written /// to the database. - fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result; + fn set_genesis_state( + &mut self, + storage: Storage, + commit: bool, + ) -> sp_blockchain::Result; /// Inject storage data into the database replacing any existing data. fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result; @@ -182,7 +189,7 @@ pub trait BlockImportOperation { &mut self, _offchain_update: OffchainChangesCollection, ) -> sp_blockchain::Result<()> { - Ok(()) + Ok(()) } /// Inject changes trie data into the database. @@ -195,7 +202,8 @@ pub trait BlockImportOperation { /// /// Values are `None` if should be deleted. fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> - where I: IntoIterator, Option>)>; + where + I: IntoIterator, Option>)>; /// Mark a block as finalized. fn mark_finalized( @@ -209,16 +217,17 @@ pub trait BlockImportOperation { fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; /// Add a transaction index operation. - fn update_transaction_index(&mut self, index: Vec) -> sp_blockchain::Result<()>; + fn update_transaction_index(&mut self, index: Vec) + -> sp_blockchain::Result<()>; } /// Interface for performing operations on the backend. pub trait LockImportRun> { /// Lock the import lock, and run operations inside. fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From; + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From; } /// Finalize Facilities @@ -270,9 +279,13 @@ pub trait AuxStore { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()>; + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()>; /// Query auxiliary data from key-value store. fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>>; @@ -287,16 +300,10 @@ pub struct KeyIterator<'a, State, Block> { _phantom: PhantomData, } -impl <'a, State, Block> KeyIterator<'a, State, Block> { +impl<'a, State, Block> KeyIterator<'a, State, Block> { /// create a KeyIterator instance pub fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec) -> Self { - Self { - state, - child_storage: None, - prefix, - current_key, - _phantom: PhantomData, - } + Self { state, child_storage: None, prefix, current_key, _phantom: PhantomData } } /// Create a `KeyIterator` instance for a child storage. @@ -306,17 +313,12 @@ impl <'a, State, Block> KeyIterator<'a, State, Block> { prefix: Option<&'a StorageKey>, current_key: Vec, ) -> Self { - Self { - state, - child_storage: Some(child_info), - prefix, - current_key, - _phantom: PhantomData, - } + Self { state, child_storage: Some(child_info), prefix, current_key, _phantom: PhantomData } } } -impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where +impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> +where Block: BlockT, State: StateBackend>, { @@ -327,11 +329,13 @@ impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where self.state.next_child_storage_key(child_info, &self.current_key) } else { self.state.next_storage_key(&self.current_key) - }.ok().flatten()?; + } + .ok() + .flatten()?; // this terminates the iterator the first time it fails. if let Some(prefix) = self.prefix { if !next_key.starts_with(&prefix.0[..]) { - return None; + return None } } self.current_key = next_key.clone(); @@ -342,19 +346,31 @@ impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where /// Provides acess to storage primitives pub trait StorageProvider> { /// Given a `BlockId` and a key, return the value under the key in that block. - fn storage(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; + fn storage( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result>; /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. - fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result>; + fn storage_keys( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result>; /// Given a `BlockId` and a key, return the value under the hash in that block. - fn storage_hash(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; + fn storage_hash( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result>; /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block. fn storage_pairs( &self, id: &BlockId, - key_prefix: &StorageKey + key_prefix: &StorageKey, ) -> sp_blockchain::Result>; /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block. @@ -362,7 +378,7 @@ pub trait StorageProvider> { &self, id: &BlockId, prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> + start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; /// Given a `BlockId`, a key and a child storage key, return the value under the key in that block. @@ -370,7 +386,7 @@ pub trait StorageProvider> { &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result>; /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys. @@ -378,7 +394,7 @@ pub trait StorageProvider> { &self, id: &BlockId, child_info: &ChildInfo, - key_prefix: &StorageKey + key_prefix: &StorageKey, ) -> sp_blockchain::Result>; /// Given a `BlockId` and a key `prefix` and a child storage key, @@ -388,7 +404,7 @@ pub trait StorageProvider> { id: &BlockId, child_info: ChildInfo, prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> + start_key: Option<&StorageKey>, ) -> sp_blockchain::Result>; /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block. @@ -396,7 +412,7 @@ pub trait StorageProvider> { &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result>; /// Get longest range within [first; last] that is possible to use in `key_changes` @@ -418,7 +434,7 @@ pub trait StorageProvider> { first: NumberFor, last: BlockId, storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result, u32)>>; } @@ -511,20 +527,20 @@ pub trait Backend: AuxStore + Send + Sync { ) -> sp_blockchain::Result<(NumberFor, HashSet)>; /// Discard non-best, unfinalized leaf block. - fn remove_leaf_block( - &self, - hash: &Block::Hash, - ) -> sp_blockchain::Result<()>; + fn remove_leaf_block(&self, hash: &Block::Hash) -> sp_blockchain::Result<()>; /// Insert auxiliary data into key-value store. fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> - { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { AuxStore::insert_aux(self, insert, delete) } /// Query auxiliary data from key-value store. @@ -548,9 +564,10 @@ pub trait PrunableStateChangesTrieStorage: /// Get reference to StateChangesTrieStorage. fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; /// Get configuration at given block. - fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result< - ChangesTrieConfigurationRange, Block::Hash> - >; + fn configuration_at( + &self, + at: &BlockId, + ) -> sp_blockchain::Result, Block::Hash>>; /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. /// It is guaranteed that we have no any changes tries before (and including) this block. /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). @@ -584,7 +601,8 @@ pub fn changes_tries_state_at_block<'a, Block: BlockT>( let config_range = storage.configuration_at(block)?; match config_range.config { - Some(config) => Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), + Some(config) => + Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), None => Ok(None), } } diff --git a/substrate/client/api/src/call_executor.rs b/substrate/client/api/src/call_executor.rs index 621cc292a71acf7cce6d370b295bce1811df909b..2d19c9fe3504ca37a0957c0df50bec5f30d128b9 100644 --- a/substrate/client/api/src/call_executor.rs +++ b/substrate/client/api/src/call_executor.rs @@ -18,20 +18,19 @@ //! A method call executor interface. -use std::{panic::UnwindSafe, result, cell::RefCell}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use sc_executor::{NativeVersion, RuntimeVersion}; +use sp_core::NativeOrEncoded; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor}, -}; -use sp_state_machine::{ - OverlayedChanges, ExecutionManager, ExecutionStrategy, StorageProof, + generic::BlockId, + traits::{Block as BlockT, HashFor}, }; -use sc_executor::{RuntimeVersion, NativeVersion}; -use sp_externalities::Extensions; -use sp_core::NativeOrEncoded; +use sp_state_machine::{ExecutionManager, ExecutionStrategy, OverlayedChanges, StorageProof}; +use std::{cell::RefCell, panic::UnwindSafe, result}; -use sp_api::{ProofRecorder, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; +use sp_api::{ProofRecorder, StorageTransactionCache}; /// Executor Provider pub trait ExecutorProvider { @@ -73,7 +72,7 @@ pub trait CallExecutor { fn contextual_call< EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, @@ -83,14 +82,18 @@ pub trait CallExecutor { method: &str, call_data: &[u8], changes: &RefCell, - storage_transaction_cache: Option<&RefCell< - StorageTransactionCache>::State>, - >>, + storage_transaction_cache: Option< + &RefCell< + StorageTransactionCache>::State>, + >, + >, execution_manager: ExecutionManager, native_call: Option, proof_recorder: &Option>, extensions: Option, - ) -> sp_blockchain::Result> where ExecutionManager: Clone; + ) -> sp_blockchain::Result> + where + ExecutionManager: Clone; /// Extract RuntimeVersion of given block /// @@ -105,12 +108,13 @@ pub trait CallExecutor { mut state: S, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8] + call_data: &[u8], ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - sp_blockchain::Error::from_state(Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box<_>) - )?; + let trie_state = state.as_trie_backend().ok_or_else(|| { + sp_blockchain::Error::from_state(Box::new( + sp_state_machine::ExecutionError::UnableToGenerateProof, + ) as Box<_>) + })?; self.prove_at_trie_state(trie_state, overlay, method, call_data) } @@ -122,7 +126,7 @@ pub trait CallExecutor { trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8] + call_data: &[u8], ) -> Result<(Vec, StorageProof), sp_blockchain::Error>; /// Get runtime version if supported. diff --git a/substrate/client/api/src/cht.rs b/substrate/client/api/src/cht.rs index 96a5a272916e59efbadda891415d6d1eba7f50eb..50b54a17f8c0ac8eead373701f00ee011e6e4139 100644 --- a/substrate/client/api/src/cht.rs +++ b/substrate/client/api/src/cht.rs @@ -25,15 +25,15 @@ //! root hash. A correct proof implies that the claimed block is identical to the one //! we discarded. -use hash_db; use codec::Encode; +use hash_db; use sp_trie; -use sp_core::{H256, convert_hash}; -use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; +use sp_core::{convert_hash, H256}; +use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; use sp_state_machine::{ - MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend + prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, + Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -49,17 +49,17 @@ pub fn size>() -> N { /// Returns Some(cht_number) if CHT is need to be built when the block with given number is canonized. pub fn is_build_required(cht_size: N, block_num: N) -> Option - where - N: Clone + AtLeast32Bit, +where + N: Clone + AtLeast32Bit, { let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?; let two = N::one() + N::one(); if block_cht_num < two { - return None; + return None } let cht_start = start_number(cht_size, block_cht_num.clone()); if cht_start != block_num { - return None; + return None } Some(block_cht_num - two) @@ -67,13 +67,13 @@ pub fn is_build_required(cht_size: N, block_num: N) -> Option /// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number. pub fn max_cht_number(cht_size: N, max_canonical_block: N) -> Option - where - N: Clone + AtLeast32Bit, +where + N: Clone + AtLeast32Bit, { let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?; let two = N::one() + N::one(); if max_cht_number < two { - return None; + return None } Some(max_cht_number - two) } @@ -86,16 +86,16 @@ pub fn compute_root( cht_num: Header::Number, hashes: I, ) -> ClientResult - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - I: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord, + I: IntoIterator>>, { use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::trie_root( - build_pairs::(cht_size, cht_num, hashes)? - )) + Ok(sp_trie::trie_types::Layout::::trie_root(build_pairs::( + cht_size, cht_num, hashes, + )?)) } /// Build CHT-based header proof. @@ -103,26 +103,28 @@ pub fn build_proof( cht_size: Header::Number, cht_num: Header::Number, blocks: BlocksI, - hashes: HashesI + hashes: HashesI, ) -> ClientResult - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, - BlocksI: IntoIterator, - HashesI: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, + BlocksI: IntoIterator, + HashesI: IntoIterator>>, { let transaction = build_pairs::(cht_size, cht_num, hashes)? .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); - let trie_storage = storage.as_trie_backend() + let trie_storage = storage + .as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); prove_read_on_trie_backend( trie_storage, blocks.into_iter().map(|number| encode_cht_key(number)), - ).map_err(ClientError::from_state) + ) + .map_err(ClientError::from_state) } /// Check CHT-based header proof. @@ -132,25 +134,24 @@ pub fn check_proof( remote_hash: Header::Hash, remote_proof: StorageProof, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, { do_check_proof::( local_root, local_number, remote_hash, - move |local_root, local_cht_key| + move |local_root, local_cht_key| { read_proof_check::( local_root, remote_proof, ::std::iter::once(local_cht_key), ) - .map(|mut map| map - .remove(local_cht_key) - .expect("checked proof of local_cht_key; qed")) - .map_err(ClientError::from_state), + .map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed")) + .map_err(ClientError::from_state) + }, ) } @@ -161,20 +162,19 @@ pub fn check_proof_on_proving_backend( remote_hash: Header::Hash, proving_backend: &TrieBackend, Hasher>, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, { do_check_proof::( local_root, local_number, remote_hash, - |_, local_cht_key| - read_proof_check_on_proving_backend::( - proving_backend, - local_cht_key, - ).map_err(ClientError::from_state), + |_, local_cht_key| { + read_proof_check_on_proving_backend::(proving_backend, local_cht_key) + .map_err(ClientError::from_state) + }, ) } @@ -185,22 +185,22 @@ fn do_check_proof( remote_hash: Header::Hash, checker: F, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord, + F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { let root: Hasher::Out = convert_hash(&local_root); let local_cht_key = encode_cht_key(local_number); let local_cht_value = checker(root, &local_cht_key)?; let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?; - let local_hash = decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; + let local_hash = + decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; match &local_hash[..] == remote_hash.as_ref() { true => Ok(()), false => Err(ClientError::InvalidCHTProof.into()), } - } /// Group ordered blocks by CHT number and call functor with blocks of each group. @@ -210,29 +210,31 @@ pub fn for_each_cht_group( mut functor: F, mut functor_param: P, ) -> ClientResult<()> - where - Header: HeaderT, - I: IntoIterator, - F: FnMut(P, Header::Number, Vec) -> ClientResult

, +where + Header: HeaderT, + I: IntoIterator, + F: FnMut(P, Header::Number, Vec) -> ClientResult

, { let mut current_cht_num = None; let mut current_cht_blocks = Vec::new(); for block in blocks { - let new_cht_num = block_to_cht_number(cht_size, block).ok_or_else(|| ClientError::Backend(format!( - "Cannot compute CHT root for the block #{}", block)) - )?; + let new_cht_num = block_to_cht_number(cht_size, block).ok_or_else(|| { + ClientError::Backend(format!("Cannot compute CHT root for the block #{}", block)) + })?; let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); if advance_to_next_cht { - let current_cht_num = current_cht_num.expect("advance_to_next_cht is true; - it is true only when current_cht_num is Some; qed"); - assert!(new_cht_num > current_cht_num, "for_each_cht_group only supports ordered iterators"); - - functor_param = functor( - functor_param, - current_cht_num, - std::mem::take(&mut current_cht_blocks), - )?; + let current_cht_num = current_cht_num.expect( + "advance_to_next_cht is true; + it is true only when current_cht_num is Some; qed", + ); + assert!( + new_cht_num > current_cht_num, + "for_each_cht_group only supports ordered iterators" + ); + + functor_param = + functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; } current_cht_blocks.push(block); @@ -240,11 +242,7 @@ pub fn for_each_cht_group( } if let Some(current_cht_num) = current_cht_num { - functor( - functor_param, - current_cht_num, - std::mem::take(&mut current_cht_blocks), - )?; + functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; } Ok(()) @@ -254,26 +252,22 @@ pub fn for_each_cht_group( fn build_pairs( cht_size: Header::Number, cht_num: Header::Number, - hashes: I + hashes: I, ) -> ClientResult, Vec)>> - where - Header: HeaderT, - I: IntoIterator>>, +where + Header: HeaderT, + I: IntoIterator>>, { let start_num = start_number(cht_size, cht_num); let mut pairs = Vec::new(); let mut hash_index = Header::Number::zero(); for hash in hashes.into_iter() { - let hash = hash?.ok_or_else(|| ClientError::from( - ClientError::MissingHashRequiredForCHT - ))?; - pairs.push(( - encode_cht_key(start_num + hash_index).to_vec(), - encode_cht_value(hash) - )); + let hash = + hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?; + pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash))); hash_index += Header::Number::one(); if hash_index == cht_size { - break; + break } } @@ -325,7 +319,6 @@ pub fn decode_cht_value(value: &[u8]) -> Option { 32 => Some(H256::from_slice(&value[0..32])), _ => None, } - } #[cfg(test)] @@ -379,8 +372,12 @@ mod tests { #[test] fn build_pairs_fails_when_no_enough_blocks() { - assert!(build_pairs::(SIZE as _, 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)).is_err()); + assert!(build_pairs::( + SIZE as _, + 0, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) + ) + .is_err()); } #[test] @@ -391,9 +388,12 @@ mod tests { ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) .take(SIZE as usize / 2) .chain(::std::iter::once(Ok(None))) - .chain(::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) - .take(SIZE as usize / 2 - 1)) - ).is_err()); + .chain( + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) + .take(SIZE as usize / 2 - 1) + ) + ) + .is_err()); } #[test] @@ -401,9 +401,9 @@ mod tests { assert!(compute_root::( SIZE as _, 42, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_ok()); + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); } #[test] @@ -413,9 +413,9 @@ mod tests { SIZE as _, 0, vec![(SIZE * 1000) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_err()); + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_err()); } #[test] @@ -424,9 +424,9 @@ mod tests { SIZE as _, 0, vec![(SIZE / 2) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_ok()); + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); } #[test] @@ -447,19 +447,27 @@ mod tests { let _ = for_each_cht_group::( cht_size, vec![ - cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5, - cht_size * 4 + 1, cht_size * 4 + 7, - cht_size * 6 + 1 - ], |_, cht_num, blocks| { + cht_size * 2 + 1, + cht_size * 2 + 2, + cht_size * 2 + 5, + cht_size * 4 + 1, + cht_size * 4 + 7, + cht_size * 6 + 1, + ], + |_, cht_num, blocks| { match cht_num { - 2 => assert_eq!(blocks, vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]), + 2 => assert_eq!( + blocks, + vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5] + ), 4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]), 6 => assert_eq!(blocks, vec![cht_size * 6 + 1]), _ => unreachable!(), } Ok(()) - }, () + }, + (), ); } } diff --git a/substrate/client/api/src/client.rs b/substrate/client/api/src/client.rs index 3f4dfc8f35be10a4e2a44dde2a35229d40b718f4..69c89f1aa5f6f24a257b3c6db1cf069e9a3d7d78 100644 --- a/substrate/client/api/src/client.rs +++ b/substrate/client/api/src/client.rs @@ -18,20 +18,19 @@ //! A set of APIs supported by the client along with their primitives. -use std::{fmt, collections::HashSet, sync::Arc, convert::TryFrom}; +use sp_consensus::BlockOrigin; use sp_core::storage::StorageKey; use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, generic::{BlockId, SignedBlock}, + traits::{Block as BlockT, NumberFor}, Justifications, }; -use sp_consensus::BlockOrigin; +use std::{collections::HashSet, convert::TryFrom, fmt, sync::Arc}; -use crate::blockchain::Info; -use crate::notifications::StorageEventStream; -use sp_utils::mpsc::TracingUnboundedReceiver; -use sp_blockchain; +use crate::{blockchain::Info, notifications::StorageEventStream}; use sc_transaction_pool_api::ChainEvent; +use sp_blockchain; +use sp_utils::mpsc::TracingUnboundedReceiver; /// Type that implements `futures::Stream` of block import events. pub type ImportNotifications = TracingUnboundedReceiver>; @@ -82,7 +81,7 @@ pub trait BlockBackend { /// Get block body by ID. Returns `None` if the body is not stored. fn block_body( &self, - id: &BlockId + id: &BlockId, ) -> sp_blockchain::Result::Extrinsic>>>; /// Get all indexed transactions for a block, @@ -99,7 +98,8 @@ pub trait BlockBackend { fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; /// Get block status. - fn block_status(&self, id: &BlockId) -> sp_blockchain::Result; + fn block_status(&self, id: &BlockId) + -> sp_blockchain::Result; /// Get block justifications for the block with the given id. fn justifications(&self, id: &BlockId) -> sp_blockchain::Result>; @@ -107,14 +107,11 @@ pub trait BlockBackend { /// Get block hash by number. fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result>; - /// Get single indexed transaction by content hash. + /// Get single indexed transaction by content hash. /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn indexed_transaction( - &self, - hash: &Block::Hash, - ) -> sp_blockchain::Result>>; + fn indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result>>; /// Check if transaction index exists. fn has_indexed_transaction(&self, hash: &Block::Hash) -> sp_blockchain::Result { @@ -125,8 +122,11 @@ pub trait BlockBackend { /// Provide a list of potential uncle headers for a given block. pub trait ProvideUncles { /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. - fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) - -> sp_blockchain::Result>; + fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result>; } /// Client info @@ -284,10 +284,7 @@ impl TryFrom> for ChainEvent { fn try_from(n: BlockImportNotification) -> Result { if n.is_new_best { - Ok(Self::NewBestBlock { - hash: n.hash, - tree_route: n.tree_route, - }) + Ok(Self::NewBestBlock { hash: n.hash, tree_route: n.tree_route }) } else { Err(()) } @@ -296,8 +293,6 @@ impl TryFrom> for ChainEvent { impl From> for ChainEvent { fn from(n: FinalityNotification) -> Self { - Self::Finalized { - hash: n.hash, - } + Self::Finalized { hash: n.hash } } } diff --git a/substrate/client/api/src/execution_extensions.rs b/substrate/client/api/src/execution_extensions.rs index fbde16afc795288857b4491cbfe3717fbebf4cce..ec44294b8a96cbe1c61c1deaba725f2448249e61 100644 --- a/substrate/client/api/src/execution_extensions.rs +++ b/substrate/client/api/src/execution_extensions.rs @@ -22,22 +22,19 @@ //! strategy for the runtime calls and provide the right `Externalities` //! extensions to support APIs for particular execution context & capabilities. -use std::sync::{Weak, Arc}; use codec::Decode; +use parking_lot::RwLock; +use sc_transaction_pool_api::OffchainSubmitTransaction; use sp_core::{ + offchain::{self, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, ExecutionContext, - offchain::{self, OffchainWorkerExt, TransactionPoolExt, OffchainDbExt}, }; +use sp_externalities::Extensions; use sp_keystore::{KeystoreExt, SyncCryptoStorePtr}; -use sp_runtime::{ - generic::BlockId, - traits, -}; -use sp_state_machine::{ExecutionManager, DefaultHandler}; +use sp_runtime::{generic::BlockId, traits}; pub use sp_state_machine::ExecutionStrategy; -use sp_externalities::Extensions; -use parking_lot::RwLock; -use sc_transaction_pool_api::OffchainSubmitTransaction; +use sp_state_machine::{DefaultHandler, ExecutionManager}; +use std::sync::{Arc, Weak}; /// Execution strategies settings. #[derive(Debug, Clone)] @@ -151,7 +148,8 @@ impl ExecutionExtensions { /// Register transaction pool extension. pub fn register_transaction_pool(&self, pool: &Arc) - where T: OffchainSubmitTransaction + 'static + where + T: OffchainSubmitTransaction + 'static, { *self.transaction_pool.write() = Some(Arc::downgrade(&pool) as _); } @@ -171,14 +169,10 @@ impl ExecutionExtensions { if capabilities.has(offchain::Capability::TransactionPool) { if let Some(pool) = self.transaction_pool.read().as_ref().and_then(|x| x.upgrade()) { - extensions.register( - TransactionPoolExt( - Box::new(TransactionPoolAdapter { - at: *at, - pool, - }) as _ - ), - ); + extensions + .register(TransactionPoolExt( + Box::new(TransactionPoolAdapter { at: *at, pool }) as _, + )); } } @@ -186,19 +180,18 @@ impl ExecutionExtensions { capabilities.has(offchain::Capability::OffchainDbWrite) { if let Some(offchain_db) = self.offchain_db.as_ref() { - extensions.register( - OffchainDbExt::new(offchain::LimitedExternalities::new( - capabilities, - offchain_db.create(), - )) - ); + extensions.register(OffchainDbExt::new(offchain::LimitedExternalities::new( + capabilities, + offchain_db.create(), + ))); } } if let ExecutionContext::OffchainCall(Some(ext)) = context { - extensions.register( - OffchainWorkerExt::new(offchain::LimitedExternalities::new(capabilities, ext.0)), - ); + extensions.register(OffchainWorkerExt::new(offchain::LimitedExternalities::new( + capabilities, + ext.0, + ))); } extensions @@ -212,21 +205,14 @@ impl ExecutionExtensions { &self, at: &BlockId, context: ExecutionContext, - ) -> ( - ExecutionManager>, - Extensions, - ) { + ) -> (ExecutionManager>, Extensions) { let manager = match context { - ExecutionContext::BlockConstruction => - self.strategies.block_construction.get_manager(), - ExecutionContext::Syncing => - self.strategies.syncing.get_manager(), - ExecutionContext::Importing => - self.strategies.importing.get_manager(), + ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), + ExecutionContext::Syncing => self.strategies.syncing.get_manager(), + ExecutionContext::Importing => self.strategies.importing.get_manager(), ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => self.strategies.offchain_worker.get_manager(), - ExecutionContext::OffchainCall(_) => - self.strategies.other.get_manager(), + ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(), }; (manager, self.extensions(at, context)) @@ -245,7 +231,7 @@ impl offchain::TransactionPool for TransactionPoolAdapter< Ok(xt) => xt, Err(e) => { log::warn!("Unable to decode extrinsic: {:?}: {}", data, e); - return Err(()); + return Err(()) }, }; diff --git a/substrate/client/api/src/in_mem.rs b/substrate/client/api/src/in_mem.rs index 505b69981694a0c8b9ff02eb03a447921953d355..e8fce19f8124e6c7ce8dc31feef1075840ec7e33 100644 --- a/substrate/client/api/src/in_mem.rs +++ b/substrate/client/api/src/in_mem.rs @@ -18,30 +18,31 @@ //! In memory client backend -use std::collections::{HashMap, HashSet}; -use std::ptr; -use std::sync::Arc; use parking_lot::RwLock; +use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; use sp_core::{ - storage::well_known_keys, offchain::storage::InMemOffchainStorage as OffchainStorage, + offchain::storage::InMemOffchainStorage as OffchainStorage, storage::well_known_keys, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, Zero}, + Justification, Justifications, Storage, }; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; -use sp_runtime::{Justification, Justifications, Storage}; use sp_state_machine::{ - ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, - ChildStorageCollection, IndexOperation, + Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, + IndexOperation, StorageCollection, +}; +use std::{ + collections::{HashMap, HashSet}, + ptr, + sync::Arc, }; -use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; use crate::{ backend::{self, NewBlockState, ProvideChtRoots}, - blockchain::{ - self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId - }, - UsageInfo, - light, + blockchain::{self, well_known_cache_keys::Id as CacheKeyId, BlockStatus, HeaderBackend}, leaves::LeafSet, + light, UsageInfo, }; struct PendingBlock { @@ -56,7 +57,11 @@ enum StoredBlock { } impl StoredBlock { - fn new(header: B::Header, body: Option>, just: Option) -> Self { + fn new( + header: B::Header, + body: Option>, + just: Option, + ) -> Self { match body { Some(body) => StoredBlock::Full(B::new(header, body), just), None => StoredBlock::Header(header, just), @@ -72,7 +77,7 @@ impl StoredBlock { fn justifications(&self) -> Option<&Justifications> { match *self { - StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref() + StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(), } } @@ -89,7 +94,7 @@ impl StoredBlock { StoredBlock::Full(block, just) => { let (header, body) = block.deconstruct(); (header, Some(body), just) - } + }, } } } @@ -123,9 +128,7 @@ impl Default for Blockchain { impl Clone for Blockchain { fn clone(&self) -> Self { let storage = Arc::new(RwLock::new(self.storage.read().clone())); - Blockchain { - storage, - } + Blockchain { storage } } } @@ -140,23 +143,20 @@ impl Blockchain { /// Create new in-memory blockchain storage. pub fn new() -> Blockchain { - let storage = Arc::new(RwLock::new( - BlockchainStorage { - blocks: HashMap::new(), - hashes: HashMap::new(), - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - header_cht_roots: HashMap::new(), - changes_trie_cht_roots: HashMap::new(), - leaves: LeafSet::new(), - aux: HashMap::new(), - })); - Blockchain { - storage, - } + let storage = Arc::new(RwLock::new(BlockchainStorage { + blocks: HashMap::new(), + hashes: HashMap::new(), + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + header_cht_roots: HashMap::new(), + changes_trie_cht_roots: HashMap::new(), + leaves: LeafSet::new(), + aux: HashMap::new(), + })); + Blockchain { storage } } /// Insert a block header and associated data. @@ -175,8 +175,12 @@ impl Blockchain { { let mut storage = self.storage.write(); - storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone()); - storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justifications)); + storage + .leaves + .import(hash.clone(), number.clone(), header.parent_hash().clone()); + storage + .blocks + .insert(hash.clone(), StoredBlock::new(header, body, justifications)); if let NewBlockState::Final = new_state { storage.finalized_hash = hash; @@ -200,7 +204,7 @@ impl Blockchain { pub fn equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true; + return true } self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks } @@ -209,14 +213,14 @@ impl Blockchain { pub fn canon_equals_to(&self, other: &Self) -> bool { // Check ptr equality first to avoid double read locks. if ptr::eq(self, other) { - return true; + return true } let this = self.storage.read(); let other = other.storage.read(); - this.hashes == other.hashes - && this.best_hash == other.best_hash - && this.best_number == other.best_number - && this.genesis_hash == other.genesis_hash + this.hashes == other.hashes && + this.best_hash == other.best_hash && + this.best_number == other.best_number && + this.genesis_hash == other.genesis_hash } /// Insert header CHT root. @@ -226,7 +230,8 @@ impl Blockchain { /// Set an existing block as head. pub fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { - let header = self.header(id)? + let header = self + .header(id)? .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))?; self.apply_head(&header) @@ -270,7 +275,11 @@ impl Blockchain { Ok(()) } - fn finalize_header(&self, id: BlockId, justification: Option) -> sp_blockchain::Result<()> { + fn finalize_header( + &self, + id: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()> { let hash = match self.header(id)? { Some(h) => h.hash(), None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), @@ -280,11 +289,13 @@ impl Blockchain { storage.finalized_hash = hash; if justification.is_some() { - let block = storage.blocks.get_mut(&hash) + let block = storage + .blocks + .get_mut(&hash) .expect("hash was fetched from a block in the db; qed"); let block_justifications = match block { - StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, }; *block_justifications = justification.map(Justifications::from); @@ -293,9 +304,11 @@ impl Blockchain { Ok(()) } - fn append_justification(&self, id: BlockId, justification: Justification) - -> sp_blockchain::Result<()> - { + fn append_justification( + &self, + id: BlockId, + justification: Justification, + ) -> sp_blockchain::Result<()> { let hash = self.expect_block_hash_from_id(&id)?; let mut storage = self.storage.write(); @@ -305,14 +318,14 @@ impl Blockchain { .expect("hash was fetched from a block in the db; qed"); let block_justifications = match block { - StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, }; if let Some(stored_justifications) = block_justifications { if !stored_justifications.append(justification) { return Err(sp_blockchain::Error::BadJustification( - "Duplicate consensus engine ID".into() - )); + "Duplicate consensus engine ID".into(), + )) } } else { *block_justifications = Some(Justifications::from(justification)); @@ -333,10 +346,13 @@ impl Blockchain { } impl HeaderBackend for Blockchain { - fn header(&self, id: BlockId) -> sp_blockchain::Result::Header>> { - Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash).map(|b| b.header().clone()) - })) + fn header( + &self, + id: BlockId, + ) -> sp_blockchain::Result::Header>> { + Ok(self + .id(id) + .and_then(|hash| self.storage.read().blocks.get(&hash).map(|b| b.header().clone()))) } fn info(&self) -> blockchain::Info { @@ -352,7 +368,7 @@ impl HeaderBackend for Blockchain { } else { None }, - number_leaves: storage.leaves.count() + number_leaves: storage.leaves.count(), } } @@ -367,7 +383,10 @@ impl HeaderBackend for Blockchain { Ok(self.storage.read().blocks.get(&hash).map(|b| *b.header().number())) } - fn hash(&self, number: <::Header as HeaderT>::Number) -> sp_blockchain::Result> { + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> sp_blockchain::Result> { Ok(self.id(BlockId::Number(number))) } } @@ -375,9 +394,15 @@ impl HeaderBackend for Blockchain { impl HeaderMetadata for Blockchain { type Error = sp_blockchain::Error; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) - .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash))) + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header(BlockId::hash(hash))? + .map(|header| CachedHeaderMetadata::from(&header)) + .ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash)) + }) } fn insert_header_metadata(&self, _hash: Block::Hash, _metadata: CachedHeaderMetadata) { @@ -389,17 +414,27 @@ impl HeaderMetadata for Blockchain { } impl blockchain::Backend for Blockchain { - fn body(&self, id: BlockId) -> sp_blockchain::Result::Extrinsic>>> { + fn body( + &self, + id: BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash) + self.storage + .read() + .blocks + .get(&hash) .and_then(|b| b.extrinsics().map(|x| x.to_vec())) })) } fn justifications(&self, id: BlockId) -> sp_blockchain::Result> { - Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b| - b.justifications().map(|x| x.clone())) - )) + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.justifications().map(|x| x.clone())) + })) } fn last_finalized(&self) -> sp_blockchain::Result { @@ -418,16 +453,13 @@ impl blockchain::Backend for Blockchain { unimplemented!() } - fn indexed_transaction( - &self, - _hash: &Block::Hash, - ) -> sp_blockchain::Result>> { + fn indexed_transaction(&self, _hash: &Block::Hash) -> sp_blockchain::Result>> { unimplemented!("Not supported by the in-mem backend.") } fn block_indexed_body( &self, - _id: BlockId + _id: BlockId, ) -> sp_blockchain::Result>>> { unimplemented!("Not supported by the in-mem backend.") } @@ -444,9 +476,13 @@ impl backend::AuxStore for Blockchain { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { let mut storage = self.storage.write(); for (k, v) in insert { storage.aux.insert(k.to_vec(), v.to_vec()); @@ -463,8 +499,8 @@ impl backend::AuxStore for Blockchain { } impl light::Storage for Blockchain - where - Block::Hash: From<[u8; 32]>, +where + Block::Hash: From<[u8; 32]>, { fn import_header( &self, @@ -507,8 +543,14 @@ impl ProvideChtRoots for Blockchain { _cht_size: NumberFor, block: NumberFor, ) -> sp_blockchain::Result> { - self.storage.read().header_cht_roots.get(&block).cloned() - .ok_or_else(|| sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block))) + self.storage + .read() + .header_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block)) + }) .map(Some) } @@ -517,8 +559,17 @@ impl ProvideChtRoots for Blockchain { _cht_size: NumberFor, block: NumberFor, ) -> sp_blockchain::Result> { - self.storage.read().changes_trie_cht_roots.get(&block).cloned() - .ok_or_else(|| sp_blockchain::Error::Backend(format!("Changes trie CHT for block {} not exists", block))) + self.storage + .read() + .changes_trie_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!( + "Changes trie CHT for block {} not exists", + block + )) + }) .map(Some) } } @@ -527,25 +578,30 @@ impl ProvideChtRoots for Blockchain { pub struct BlockImportOperation { pending_block: Option>, old_state: InMemoryBackend>, - new_state: Option<> as StateBackend>>::Transaction>, + new_state: + Option<> as StateBackend>>::Transaction>, aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, } -impl BlockImportOperation where +impl BlockImportOperation +where Block::Hash: Ord, { - fn apply_storage(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result { + fn apply_storage( + &mut self, + storage: Storage, + commit: bool, + ) -> sp_blockchain::Result { check_genesis_storage(&storage)?; - let child_delta = storage.children_default.iter() - .map(|(_storage_key, child_content)| - ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))) - ) - ); + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + ) + }); let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), @@ -559,7 +615,8 @@ impl BlockImportOperation where } } -impl backend::BlockImportOperation for BlockImportOperation where +impl backend::BlockImportOperation for BlockImportOperation +where Block::Hash: Ord, { type State = InMemoryBackend>; @@ -577,10 +634,8 @@ impl backend::BlockImportOperation for BlockImportOperatio state: NewBlockState, ) -> sp_blockchain::Result<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - self.pending_block = Some(PendingBlock { - block: StoredBlock::new(header, body, justifications), - state, - }); + self.pending_block = + Some(PendingBlock { block: StoredBlock::new(header, body, justifications), state }); Ok(()) } @@ -601,7 +656,11 @@ impl backend::BlockImportOperation for BlockImportOperatio Ok(()) } - fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> sp_blockchain::Result { + fn set_genesis_state( + &mut self, + storage: Storage, + commit: bool, + ) -> sp_blockchain::Result { self.apply_storage(storage, commit) } @@ -610,7 +669,8 @@ impl backend::BlockImportOperation for BlockImportOperatio } fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { self.aux.append(&mut ops.into_iter().collect()); Ok(()) @@ -639,7 +699,10 @@ impl backend::BlockImportOperation for BlockImportOperatio Ok(()) } - fn update_transaction_index(&mut self, _index: Vec) -> sp_blockchain::Result<()> { + fn update_transaction_index( + &mut self, + _index: Vec, + ) -> sp_blockchain::Result<()> { Ok(()) } } @@ -648,13 +711,19 @@ impl backend::BlockImportOperation for BlockImportOperatio /// /// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this /// > struct for testing purposes. Do **NOT** use in production. -pub struct Backend where Block::Hash: Ord { +pub struct Backend +where + Block::Hash: Ord, +{ states: RwLock>>>, blockchain: Blockchain, import_lock: RwLock<()>, } -impl Backend where Block::Hash: Ord { +impl Backend +where + Block::Hash: Ord, +{ /// Create a new instance of in-mem backend. pub fn new() -> Self { Backend { @@ -665,14 +734,21 @@ impl Backend where Block::Hash: Ord { } } -impl backend::AuxStore for Backend where Block::Hash: Ord { +impl backend::AuxStore for Backend +where + Block::Hash: Ord, +{ fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { self.blockchain.insert_aux(insert, delete) } @@ -681,7 +757,10 @@ impl backend::AuxStore for Backend where Block::Hash: Ord } } -impl backend::Backend for Backend where Block::Hash: Ord { +impl backend::Backend for Backend +where + Block::Hash: Ord, +{ type BlockImportOperation = BlockImportOperation; type Blockchain = Blockchain; type State = InMemoryBackend>; @@ -708,10 +787,7 @@ impl backend::Backend for Backend where Block::Hash Ok(()) } - fn commit_operation( - &self, - operation: Self::BlockImportOperation, - ) -> sp_blockchain::Result<()> { + fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { if !operation.finalized_blocks.is_empty() { for (block, justification) in operation.finalized_blocks { self.blockchain.finalize_header(block, justification)?; @@ -779,13 +855,13 @@ impl backend::Backend for Backend where Block::Hash fn state_at(&self, block: BlockId) -> sp_blockchain::Result { match block { - BlockId::Hash(h) if h == Default::default() => { - return Ok(Self::State::default()); - }, + BlockId::Hash(h) if h == Default::default() => return Ok(Self::State::default()), _ => {}, } - self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) + self.blockchain + .id(block) + .and_then(|id| self.states.read().get(&id).cloned()) .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", block))) } @@ -797,10 +873,7 @@ impl backend::Backend for Backend where Block::Hash Ok((Zero::zero(), HashSet::new())) } - fn remove_leaf_block( - &self, - _hash: &Block::Hash, - ) -> sp_blockchain::Result<()> { + fn remove_leaf_block(&self, _hash: &Block::Hash) -> sp_blockchain::Result<()> { Ok(()) } @@ -811,9 +884,13 @@ impl backend::Backend for Backend where Block::Hash impl backend::LocalBackend for Backend where Block::Hash: Ord {} -impl backend::RemoteBackend for Backend where Block::Hash: Ord { +impl backend::RemoteBackend for Backend +where + Block::Hash: Ord, +{ fn is_local_state_available(&self, block: &BlockId) -> bool { - self.blockchain.expect_block_number_from_id(block) + self.blockchain + .expect_block_number_from_id(block) .map(|num| num.is_zero()) .unwrap_or(false) } @@ -826,12 +903,15 @@ impl backend::RemoteBackend for Backend where Block /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::InvalidState.into()); + return Err(sp_blockchain::Error::InvalidState.into()) } - if storage.children_default.keys() - .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(sp_blockchain::Error::InvalidState.into()); + if storage + .children_default + .keys() + .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) + { + return Err(sp_blockchain::Error::InvalidState.into()) } Ok(()) @@ -839,10 +919,10 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { #[cfg(test)] mod tests { - use crate::{NewBlockState, in_mem::Blockchain}; + use crate::{in_mem::Blockchain, NewBlockState}; use sp_api::{BlockId, HeaderT}; - use sp_runtime::{ConsensusEngineId, Justifications}; use sp_blockchain::Backend; + use sp_runtime::{ConsensusEngineId, Justifications}; use substrate_test_runtime::{Block, Header, H256}; pub const ID1: ConsensusEngineId = *b"TST1"; @@ -853,7 +933,13 @@ mod tests { 0 => Default::default(), _ => header(number - 1).hash(), }; - Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(0), parent_hash, Default::default()) + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(0), + parent_hash, + Default::default(), + ) } fn test_blockchain() -> Blockchain { @@ -862,10 +948,18 @@ mod tests { let just1 = Some(Justifications::from((ID1, vec![1]))); let just2 = None; let just3 = Some(Justifications::from((ID1, vec![3]))); - blockchain.insert(header(0).hash(), header(0), just0, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(1).hash(), header(1), just1, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(2).hash(), header(2), just2, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(3).hash(), header(3), just3, None, NewBlockState::Final).unwrap(); + blockchain + .insert(header(0).hash(), header(0), just0, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(1).hash(), header(1), just1, None, NewBlockState::Final) + .unwrap(); + blockchain + .insert(header(2).hash(), header(2), just2, None, NewBlockState::Best) + .unwrap(); + blockchain + .insert(header(3).hash(), header(3), just3, None, NewBlockState::Final) + .unwrap(); blockchain } diff --git a/substrate/client/api/src/leaves.rs b/substrate/client/api/src/leaves.rs index 0474d5bb8fe17178342b0f2be50a841cc8199f92..db5a25b451c5686d4e1489526ba129a5abadf22a 100644 --- a/substrate/client/api/src/leaves.rs +++ b/substrate/client/api/src/leaves.rs @@ -18,12 +18,11 @@ //! Helper for managing the set of available leaves in the chain for DB implementations. -use std::collections::BTreeMap; -use std::cmp::Reverse; +use codec::{Decode, Encode}; +use sp_blockchain::{Error, Result}; use sp_database::{Database, Transaction}; use sp_runtime::traits::AtLeast32Bit; -use codec::{Encode, Decode}; -use sp_blockchain::{Error, Result}; +use std::{cmp::Reverse, collections::BTreeMap}; type DbHash = sp_core::H256; @@ -57,7 +56,7 @@ impl FinalizationDisplaced { } /// Iterate over all displaced leaves. - pub fn leaves(&self) -> impl IntoIterator { + pub fn leaves(&self) -> impl IntoIterator { self.leaves.values().flatten() } } @@ -72,17 +71,14 @@ pub struct LeafSet { pending_removed: Vec, } -impl LeafSet where +impl LeafSet +where H: Clone + PartialEq + Decode + Encode, N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { /// Construct a new, blank leaf set. pub fn new() -> Self { - Self { - storage: BTreeMap::new(), - pending_added: Vec::new(), - pending_removed: Vec::new(), - } + Self { storage: BTreeMap::new(), pending_added: Vec::new(), pending_removed: Vec::new() } } /// Read the leaf list from the DB, using given prefix for keys. @@ -98,14 +94,10 @@ impl LeafSet where for (number, hashes) in vals.into_iter() { storage.insert(Reverse(number), hashes); } - } + }, None => {}, } - Ok(Self { - storage, - pending_added: Vec::new(), - pending_removed: Vec::new(), - }) + Ok(Self { storage, pending_added: Vec::new(), pending_removed: Vec::new() }) } /// update the leaf list on import. returns a displaced leaf if there was one. @@ -119,10 +111,7 @@ impl LeafSet where self.pending_removed.push(parent_hash.clone()); Some(ImportDisplaced { new_hash: hash.clone(), - displaced: LeafSetItem { - hash: parent_hash, - number: new_number, - }, + displaced: LeafSetItem { hash: parent_hash, number: new_number }, }) } else { None @@ -144,16 +133,15 @@ impl LeafSet where /// will be pruned soon afterwards anyway. pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() }; + return FinalizationDisplaced { leaves: BTreeMap::new() } } else { number - N::one() }; let below_boundary = self.storage.split_off(&Reverse(boundary)); - self.pending_removed.extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); - FinalizationDisplaced { - leaves: below_boundary, - } + self.pending_removed + .extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); + FinalizationDisplaced { leaves: below_boundary } } /// Undo all pending operations. @@ -169,7 +157,9 @@ impl LeafSet where /// Revert to the given block height by dropping all leaves in the leaf set /// with a block number higher than the target. pub fn revert(&mut self, best_hash: H, best_number: N) { - let items = self.storage.iter() + let items = self + .storage + .iter() .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.clone()))) .collect::>(); @@ -185,7 +175,8 @@ impl LeafSet where } let best_number = Reverse(best_number); - let leaves_contains_best = self.storage + let leaves_contains_best = self + .storage .get(&best_number) .map_or(false, |hashes| hashes.contains(&best_hash)); @@ -209,7 +200,12 @@ impl LeafSet where } /// Write the leaf list to the database transaction. - pub fn prepare_transaction(&mut self, tx: &mut Transaction, column: u32, prefix: &[u8]) { + pub fn prepare_transaction( + &mut self, + tx: &mut Transaction, + column: u32, + prefix: &[u8], + ) { let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); tx.set_from_vec(column, prefix, leaves.encode()); self.pending_added.clear(); @@ -218,7 +214,9 @@ impl LeafSet where /// Check if given block is a leaf. pub fn contains(&self, number: N, hash: H) -> bool { - self.storage.get(&Reverse(number)).map_or(false, |hashes| hashes.contains(&hash)) + self.storage + .get(&Reverse(number)) + .map_or(false, |hashes| hashes.contains(&hash)) } fn insert_leaf(&mut self, number: Reverse, hash: H) { @@ -230,14 +228,18 @@ impl LeafSet where let mut empty = false; let removed = self.storage.get_mut(number).map_or(false, |leaves| { let mut found = false; - leaves.retain(|h| if h == hash { - found = true; - false - } else { - true + leaves.retain(|h| { + if h == hash { + found = true; + false + } else { + true + } }); - if leaves.is_empty() { empty = true } + if leaves.is_empty() { + empty = true + } found }); @@ -255,7 +257,8 @@ pub struct Undo<'a, H: 'a, N: 'a> { inner: &'a mut LeafSet, } -impl<'a, H: 'a, N: 'a> Undo<'a, H, N> where +impl<'a, H: 'a, N: 'a> Undo<'a, H, N> +where H: Clone + PartialEq + Decode + Encode, N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { @@ -329,7 +332,7 @@ mod tests { fn two_leaves_same_height_can_be_included() { let mut set = LeafSet::new(); - set.import(1_1u32, 10u32,0u32); + set.import(1_1u32, 10u32, 0u32); set.import(1_2, 10, 0); assert!(set.storage.contains_key(&Reverse(10))); diff --git a/substrate/client/api/src/lib.rs b/substrate/client/api/src/lib.rs index 71cf499f7994355b4c8036cbd2c1229c46d21ff0..16935b1e846cf60b6d24d86a0d357c40df1fff86 100644 --- a/substrate/client/api/src/lib.rs +++ b/substrate/client/api/src/lib.rs @@ -21,30 +21,28 @@ pub mod backend; pub mod call_executor; -pub mod client; pub mod cht; +pub mod client; pub mod execution_extensions; pub mod in_mem; -pub mod light; pub mod leaves; +pub mod light; pub mod notifications; pub mod proof_provider; -pub use sp_blockchain as blockchain; pub use backend::*; -pub use notifications::*; pub use call_executor::*; pub use client::*; pub use light::*; pub use notifications::*; pub use proof_provider::*; +pub use sp_blockchain as blockchain; pub use sp_blockchain::HeaderBackend; -pub use sp_state_machine::{StorageProof, ExecutionStrategy}; -pub use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; +pub use sp_state_machine::{ExecutionStrategy, StorageProof}; +pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; /// Usage Information Provider interface -/// pub trait UsageProvider { /// Get usage info about current client. fn usage_info(&self) -> ClientInfo; @@ -52,7 +50,7 @@ pub trait UsageProvider { /// Utility methods for the client. pub mod utils { - use sp_blockchain::{HeaderBackend, HeaderMetadata, Error}; + use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::Block as BlockT; use std::borrow::Borrow; @@ -66,19 +64,24 @@ pub mod utils { client: &'a T, current: Option<(Block::Hash, Block::Hash)>, ) -> impl Fn(&Block::Hash, &Block::Hash) -> Result + 'a - where T: HeaderBackend + HeaderMetadata, + where + T: HeaderBackend + HeaderMetadata, { move |base, hash| { - if base == hash { return Ok(false); } + if base == hash { + return Ok(false) + } let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); let mut hash = hash; if let Some((current_hash, current_parent_hash)) = current { - if base == current_hash { return Ok(false); } + if base == current_hash { + return Ok(false) + } if hash == current_hash { if base == current_parent_hash { - return Ok(true); + return Ok(true) } else { hash = current_parent_hash; } diff --git a/substrate/client/api/src/light.rs b/substrate/client/api/src/light.rs index a068e2d4a3417c8576a9964b23d75058de3e6f45..8638ddf741f3021d7da535f1e12f4fb4d2721d34 100644 --- a/substrate/client/api/src/light.rs +++ b/substrate/client/api/src/light.rs @@ -18,23 +18,26 @@ //! Substrate light client interfaces -use std::sync::Arc; -use std::collections::{BTreeMap, HashMap}; -use std::future::Future; +use std::{ + collections::{BTreeMap, HashMap}, + future::Future, + sync::Arc, +}; -use sp_runtime::{ - traits::{ - Block as BlockT, Header as HeaderT, NumberFor, - }, - generic::BlockId +use crate::{ + backend::{AuxStore, NewBlockState}, + ProvideChtRoots, UsageInfo, }; -use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; -use sp_state_machine::StorageProof; use sp_blockchain::{ - HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, - Error as ClientError, Result as ClientResult, + well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderBackend, + HeaderMetadata, Result as ClientResult, +}; +use sp_core::{storage::PrefixedStorageKey, ChangesTrieConfigurationRange}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, }; -use crate::{backend::{AuxStore, NewBlockState}, UsageInfo, ProvideChtRoots}; +use sp_state_machine::StorageProof; /// Remote call request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] @@ -142,48 +145,48 @@ pub struct RemoteBodyRequest { /// is correct (see FetchedDataChecker) and return already checked data. pub trait Fetcher: Send + Sync { /// Remote header future. - type RemoteHeaderResult: Future> + Unpin + Send + 'static; + type RemoteHeaderResult: Future> + + Unpin + + Send + + 'static; /// Remote storage read future. - type RemoteReadResult: Future, Option>>, - ClientError, - >> + Unpin + Send + 'static; + type RemoteReadResult: Future, Option>>, ClientError>> + + Unpin + + Send + + 'static; /// Remote call result future. - type RemoteCallResult: Future, - ClientError, - >> + Unpin + Send + 'static; + type RemoteCallResult: Future, ClientError>> + Unpin + Send + 'static; /// Remote changes result future. - type RemoteChangesResult: Future, u32)>, - ClientError, - >> + Unpin + Send + 'static; + type RemoteChangesResult: Future, u32)>, ClientError>> + + Unpin + + Send + + 'static; /// Remote block body result future. - type RemoteBodyResult: Future, - ClientError, - >> + Unpin + Send + 'static; + type RemoteBodyResult: Future, ClientError>> + + Unpin + + Send + + 'static; /// Fetch remote header. - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult; - /// Fetch remote storage value. - fn remote_read( + fn remote_header( &self, - request: RemoteReadRequest - ) -> Self::RemoteReadResult; + request: RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult; + /// Fetch remote storage value. + fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult; /// Fetch remote storage child value. fn remote_read_child( &self, - request: RemoteReadChildRequest + request: RemoteReadChildRequest, ) -> Self::RemoteReadResult; /// Fetch remote call result. fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed /// at a given blocks range. - fn remote_changes(&self, request: RemoteChangesRequest) -> Self::RemoteChangesResult; + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult; /// Fetch remote block body fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult; } @@ -222,20 +225,22 @@ pub trait FetchChecker: Send + Sync { fn check_changes_proof( &self, request: &RemoteChangesRequest, - proof: ChangesProof + proof: ChangesProof, ) -> ClientResult, u32)>>; /// Check remote body proof. fn check_body_proof( &self, request: &RemoteBodyRequest, - body: Vec + body: Vec, ) -> ClientResult>; } - /// Light client blockchain storage. -pub trait Storage: AuxStore + HeaderBackend - + HeaderMetadata + ProvideChtRoots +pub trait Storage: + AuxStore + + HeaderBackend + + HeaderMetadata + + ProvideChtRoots { /// Store new header. Should refuse to revert any finalized blocks. /// @@ -280,10 +285,10 @@ pub enum LocalOrRemote { /// locally, or fetches required data from remote node. pub trait RemoteBlockchain: Send + Sync { /// Get block header. - fn header(&self, id: BlockId) -> ClientResult, - >>; + fn header( + &self, + id: BlockId, + ) -> ClientResult>>; } /// Returns future that resolves header either locally, or remotely. @@ -295,11 +300,8 @@ pub fn future_header>( use futures::future::{ready, Either, FutureExt}; match blockchain.header(id) { - Ok(LocalOrRemote::Remote(request)) => Either::Left( - fetcher - .remote_header(request) - .then(|header| ready(header.map(Some))) - ), + Ok(LocalOrRemote::Remote(request)) => + Either::Left(fetcher.remote_header(request).then(|header| ready(header.map(Some)))), Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), Err(err) => Either::Right(ready(Err(err))), @@ -308,11 +310,11 @@ pub fn future_header>( #[cfg(test)] pub mod tests { + use super::*; use futures::future::Ready; use parking_lot::Mutex; use sp_blockchain::Error as ClientError; - use sp_test_primitives::{Block, Header, Extrinsic}; - use super::*; + use sp_test_primitives::{Block, Extrinsic, Header}; #[derive(Debug, thiserror::Error)] #[error("Not implemented on test node")] @@ -322,12 +324,11 @@ pub mod tests { fn into(self) -> ClientError { ClientError::Application(Box::new(self)) } - } - + } + pub type OkCallFetcher = Mutex>; - fn not_implemented_in_tests() -> Ready> - { + fn not_implemented_in_tests() -> Ready> { futures::future::ready(Err(MockError.into())) } @@ -346,7 +347,10 @@ pub mod tests { not_implemented_in_tests() } - fn remote_read_child(&self, _request: RemoteReadChildRequest

) -> Self::RemoteReadResult { + fn remote_read_child( + &self, + _request: RemoteReadChildRequest
, + ) -> Self::RemoteReadResult { not_implemented_in_tests() } @@ -354,7 +358,10 @@ pub mod tests { futures::future::ready(Ok((*self.lock()).clone())) } - fn remote_changes(&self, _request: RemoteChangesRequest
) -> Self::RemoteChangesResult { + fn remote_changes( + &self, + _request: RemoteChangesRequest
, + ) -> Self::RemoteChangesResult { not_implemented_in_tests() } diff --git a/substrate/client/api/src/notifications.rs b/substrate/client/api/src/notifications.rs index b043a332d667d8a1dfe3ab3071462610ee0acf33..3532568f9bd5d637926b5661d8bcd93466399cfe 100644 --- a/substrate/client/api/src/notifications.rs +++ b/substrate/client/api/src/notifications.rs @@ -19,15 +19,15 @@ //! Storage notifications use std::{ - collections::{HashSet, HashMap}, + collections::{HashMap, HashSet}, sync::Arc, }; -use fnv::{FnvHashSet, FnvHashMap}; -use sp_core::storage::{StorageKey, StorageData}; +use fnv::{FnvHashMap, FnvHashSet}; +use prometheus_endpoint::{register, CounterVec, Opts, Registry, U64}; +use sp_core::storage::{StorageData, StorageKey}; use sp_runtime::traits::Block as BlockT; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; -use prometheus_endpoint::{Registry, CounterVec, Opts, U64, register}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; /// Storage change set #[derive(Debug)] @@ -40,29 +40,34 @@ pub struct StorageChangeSet { impl StorageChangeSet { /// Convert the change set into iterator over storage items. - pub fn iter<'a>(&'a self) - -> impl Iterator, &'a StorageKey, Option<&'a StorageData>)> + 'a { - let top = self.changes + pub fn iter<'a>( + &'a self, + ) -> impl Iterator, &'a StorageKey, Option<&'a StorageData>)> + 'a + { + let top = self + .changes .iter() .filter(move |&(key, _)| match self.filter { Some(ref filter) => filter.contains(key), None => true, }) - .map(move |(k,v)| (None, k, v.as_ref())); - let children = self.child_changes + .map(move |(k, v)| (None, k, v.as_ref())); + let children = self + .child_changes .iter() - .filter_map(move |(sk, changes)| - self.child_filters.as_ref().and_then(|cf| - cf.get(sk).map(|filter| changes + .filter_map(move |(sk, changes)| { + self.child_filters.as_ref().and_then(|cf| { + cf.get(sk).map(|filter| { + changes .iter() .filter(move |&(key, _)| match filter { Some(ref filter) => filter.contains(key), None => true, }) - .map(move |(k,v)| (Some(sk), k, v.as_ref())) - ) - ) - ) + .map(move |(k, v)| (Some(sk), k, v.as_ref())) + }) + }) + }) .flatten(); top.chain(children) } @@ -82,15 +87,18 @@ pub struct StorageNotifications { next_id: SubscriberId, wildcard_listeners: FnvHashSet, listeners: HashMap>, - child_listeners: HashMap>, - FnvHashSet - )>, - sinks: FnvHashMap, - Option>, - Option>>>, - )>, + child_listeners: HashMap< + StorageKey, + (HashMap>, FnvHashSet), + >, + sinks: FnvHashMap< + SubscriberId, + ( + TracingUnboundedSender<(Block::Hash, StorageChangeSet)>, + Option>, + Option>>>, + ), + >, } impl Default for StorageNotifications { @@ -110,16 +118,17 @@ impl StorageNotifications { /// Initialize a new StorageNotifications /// optionally pass a prometheus registry to send subscriber metrics to pub fn new(prometheus_registry: Option) -> Self { - let metrics = prometheus_registry.and_then(|r| + let metrics = prometheus_registry.and_then(|r| { CounterVec::new( Opts::new( "storage_notification_subscribers", - "Number of subscribers in storage notification sytem" + "Number of subscribers in storage notification sytem", ), - &["action"], //added | removed - ).and_then(|g| register(g, &r)) + &["action"], // added | removed + ) + .and_then(|g| register(g, &r)) .ok() - ); + }); StorageNotifications { metrics, @@ -137,17 +146,16 @@ impl StorageNotifications { pub fn trigger( &mut self, hash: &Block::Hash, - changeset: impl Iterator, Option>)>, + changeset: impl Iterator, Option>)>, child_changeset: impl Iterator< - Item=(Vec, impl Iterator, Option>)>) + Item = (Vec, impl Iterator, Option>)>), >, ) { - let has_wildcard = !self.wildcard_listeners.is_empty(); // early exit if no listeners if !has_wildcard && self.listeners.is_empty() && self.child_listeners.is_empty() { - return; + return } let mut subscribers = self.wildcard_listeners.clone(); @@ -193,24 +201,29 @@ impl StorageNotifications { // Don't send empty notifications if changes.is_empty() && child_changes.is_empty() { - return; + return } let changes = Arc::new(changes); let child_changes = Arc::new(child_changes); // Trigger the events - let to_remove = self.sinks + let to_remove = self + .sinks .iter() .filter_map(|(subscriber, &(ref sink, ref filter, ref child_filters))| { let should_remove = { if subscribers.contains(subscriber) { - sink.unbounded_send((hash.clone(), StorageChangeSet { - changes: changes.clone(), - child_changes: child_changes.clone(), - filter: filter.clone(), - child_filters: child_filters.clone(), - })).is_err() + sink.unbounded_send(( + hash.clone(), + StorageChangeSet { + changes: changes.clone(), + child_changes: child_changes.clone(), + filter: filter.clone(), + child_filters: child_filters.clone(), + }, + )) + .is_err() } else { sink.is_closed() } @@ -221,7 +234,8 @@ impl StorageNotifications { } else { None } - }).collect::>(); + }) + .collect::>(); for sub_id in to_remove { self.remove_subscriber(sub_id); @@ -233,13 +247,12 @@ impl StorageNotifications { filters: &Option>, listeners: &mut HashMap>, wildcards: &mut FnvHashSet, - ){ + ) { match filters { None => { wildcards.remove(subscriber); }, - Some(filters) => { - + Some(filters) => for key in filters.iter() { let remove_key = match listeners.get_mut(key) { Some(ref mut set) => { @@ -252,8 +265,7 @@ impl StorageNotifications { if remove_key { listeners.remove(key); } - } - } + }, } } @@ -267,7 +279,6 @@ impl StorageNotifications { ); if let Some(child_filters) = child_filters.as_ref() { for (c_key, filters) in child_filters { - if let Some((listeners, wildcards)) = self.child_listeners.get_mut(&c_key) { Self::remove_subscriber_from( &subscriber, @@ -293,20 +304,24 @@ impl StorageNotifications { filter_keys: &Option>, listeners: &mut HashMap>, wildcards: &mut FnvHashSet, - ) -> Option> - { + ) -> Option> { match filter_keys { None => { wildcards.insert(current_id); None }, - Some(keys) => Some(keys.as_ref().iter().map(|key| { - listeners - .entry(key.clone()) - .or_insert_with(Default::default) - .insert(current_id); - key.clone() - }).collect()) + Some(keys) => Some( + keys.as_ref() + .iter() + .map(|key| { + listeners + .entry(key.clone()) + .or_insert_with(Default::default) + .insert(current_id); + key.clone() + }) + .collect(), + ), } } @@ -327,21 +342,20 @@ impl StorageNotifications { &mut self.wildcard_listeners, ); let child_keys = filter_child_keys.map(|filter_child_keys| { - filter_child_keys.iter().map(|(c_key, o_keys)| { - let (c_listeners, c_wildcards) = self.child_listeners - .entry(c_key.clone()) - .or_insert_with(Default::default); - - (c_key.clone(), Self::listen_from( - current_id, - o_keys, - &mut *c_listeners, - &mut *c_wildcards, - )) - }).collect() + filter_child_keys + .iter() + .map(|(c_key, o_keys)| { + let (c_listeners, c_wildcards) = + self.child_listeners.entry(c_key.clone()).or_insert_with(Default::default); + + ( + c_key.clone(), + Self::listen_from(current_id, o_keys, &mut *c_listeners, &mut *c_wildcards), + ) + }) + .collect() }); - // insert sink let (tx, rx) = tracing_unbounded("mpsc_storage_notification_items"); self.sinks.insert(current_id, (tx, keys, child_keys)); @@ -356,8 +370,8 @@ impl StorageNotifications { #[cfg(test)] mod tests { - use sp_runtime::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper}; use super::*; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; use std::iter::{empty, Empty}; type TestChangeSet = ( @@ -369,10 +383,12 @@ mod tests { impl From for StorageChangeSet { fn from(changes: TestChangeSet) -> Self { // warning hardcoded child trie wildcard to test upon - let child_filters = Some([ - (StorageKey(vec![4]), None), - (StorageKey(vec![5]), None), - ].iter().cloned().collect()); + let child_filters = Some( + [(StorageKey(vec![4]), None), (StorageKey(vec![5]), None)] + .iter() + .cloned() + .collect(), + ); StorageChangeSet { changes: Arc::new(changes.0), child_changes: Arc::new(changes.1), @@ -396,34 +412,40 @@ mod tests { // given let mut notifications = StorageNotifications::::default(); let child_filter = [(StorageKey(vec![4]), None)]; - let mut recv = futures::executor::block_on_stream( - notifications.listen(None, Some(&child_filter[..])) - ); + let mut recv = + futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter[..]))); // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![3], None), - ]; - let c_changeset_1 = vec![ - (vec![5], Some(vec![4])), - (vec![6], None), - ]; + let changeset = vec![(vec![2], Some(vec![3])), (vec![3], None)]; + let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)]; let c_changeset = vec![(vec![4], c_changeset_1)]; notifications.trigger( &Hash::from_low_u64_be(1), changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), + c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())), ); // then - assert_eq!(recv.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - (StorageKey(vec![3]), None), - ], vec![(StorageKey(vec![4]), vec![ - (StorageKey(vec![5]), Some(StorageData(vec![4]))), - (StorageKey(vec![6]), None), - ])]).into())); + assert_eq!( + recv.next().unwrap(), + ( + Hash::from_low_u64_be(1), + ( + vec![ + (StorageKey(vec![2]), Some(StorageData(vec![3]))), + (StorageKey(vec![3]), None), + ], + vec![( + StorageKey(vec![4]), + vec![ + (StorageKey(vec![5]), Some(StorageData(vec![4]))), + (StorageKey(vec![6]), None), + ] + )] + ) + .into() + ) + ); } #[test] @@ -432,44 +454,52 @@ mod tests { let mut notifications = StorageNotifications::::default(); let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; let mut recv1 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![1])]), None) + notifications.listen(Some(&[StorageKey(vec![1])]), None), ); let mut recv2 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![2])]), None) + notifications.listen(Some(&[StorageKey(vec![2])]), None), ); let mut recv3 = futures::executor::block_on_stream( - notifications.listen(Some(&[]), Some(&child_filter)) + notifications.listen(Some(&[]), Some(&child_filter)), ); // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; - let c_changeset_1 = vec![ - (vec![5], Some(vec![4])), - (vec![6], None), - ]; + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; + let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)]; let c_changeset = vec![(vec![4], c_changeset_1)]; notifications.trigger( &Hash::from_low_u64_be(1), changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), + c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())), ); // then - assert_eq!(recv1.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![1]), None), - ], vec![]).into())); - assert_eq!(recv2.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - ], vec![]).into())); - assert_eq!(recv3.next().unwrap(), (Hash::from_low_u64_be(1), (vec![], - vec![ - (StorageKey(vec![4]), vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))]), - ]).into())); - + assert_eq!( + recv1.next().unwrap(), + (Hash::from_low_u64_be(1), (vec![(StorageKey(vec![1]), None),], vec![]).into()) + ); + assert_eq!( + recv2.next().unwrap(), + ( + Hash::from_low_u64_be(1), + (vec![(StorageKey(vec![2]), Some(StorageData(vec![3]))),], vec![]).into() + ) + ); + assert_eq!( + recv3.next().unwrap(), + ( + Hash::from_low_u64_be(1), + ( + vec![], + vec![( + StorageKey(vec![4]), + vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))] + ),] + ) + .into() + ) + ); } #[test] @@ -479,27 +509,21 @@ mod tests { { let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; let _recv1 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![1])]), None) + notifications.listen(Some(&[StorageKey(vec![1])]), None), ); let _recv2 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![2])]), None) - ); - let _recv3 = futures::executor::block_on_stream( - notifications.listen(None, None) - ); - let _recv4 = futures::executor::block_on_stream( - notifications.listen(None, Some(&child_filter)) + notifications.listen(Some(&[StorageKey(vec![2])]), None), ); + let _recv3 = futures::executor::block_on_stream(notifications.listen(None, None)); + let _recv4 = + futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter))); assert_eq!(notifications.listeners.len(), 2); assert_eq!(notifications.wildcard_listeners.len(), 2); assert_eq!(notifications.child_listeners.len(), 1); } // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; let c_changeset = empty::<(_, Empty<_>)>(); notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset); diff --git a/substrate/client/api/src/proof_provider.rs b/substrate/client/api/src/proof_provider.rs index 0e9fd5318ba905cc398762d3e2e394bda19d570e..ad0989c743961462beb84c35347810b9a5b0ef37 100644 --- a/substrate/client/api/src/proof_provider.rs +++ b/substrate/client/api/src/proof_provider.rs @@ -17,12 +17,9 @@ // along with this program. If not, see . //! Proof utilities -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT}, -}; -use crate::{StorageProof, ChangesProof}; -use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; +use crate::{ChangesProof, StorageProof}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider { @@ -30,7 +27,7 @@ pub trait ProofProvider { fn read_proof( &self, id: &BlockId, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result; /// Reads child storage value at a given block + storage_key + key, returning @@ -39,7 +36,7 @@ pub trait ProofProvider { &self, id: &BlockId, child_info: &ChildInfo, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result; /// Execute a call to a contract on top of state in a block of given hash @@ -53,7 +50,10 @@ pub trait ProofProvider { call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; /// Reads given header and generates CHT-based header proof. - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)>; + fn header_proof( + &self, + id: &BlockId, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)>; /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using diff --git a/substrate/client/authority-discovery/src/interval.rs b/substrate/client/authority-discovery/src/interval.rs index 0710487203d5360f556c62ae621744851301f6aa..f4e7c43e60d215021d734d2abe5e30c28dc28d10 100644 --- a/substrate/client/authority-discovery/src/interval.rs +++ b/substrate/client/authority-discovery/src/interval.rs @@ -16,13 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use futures::stream::Stream; -use futures::future::FutureExt; -use futures::ready; +use futures::{future::FutureExt, ready, stream::Stream}; use futures_timer::Delay; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; /// Exponentially increasing interval /// @@ -37,11 +37,7 @@ impl ExpIncInterval { /// Create a new [`ExpIncInterval`]. pub fn new(start: Duration, max: Duration) -> Self { let delay = Delay::new(start); - Self { - max, - next: start * 2, - delay, - } + Self { max, next: start * 2, delay } } /// Fast forward the exponentially increasing interval to the configured maximum. diff --git a/substrate/client/authority-discovery/src/lib.rs b/substrate/client/authority-discovery/src/lib.rs index ab6338963da46f9ea70b03da3bbd76c2a94ca8c1..4929ce69917a000ed1ee464e54925e56ccf7416f 100644 --- a/substrate/client/authority-discovery/src/lib.rs +++ b/substrate/client/authority-discovery/src/lib.rs @@ -26,18 +26,23 @@ //! //! See [`Worker`] and [`Service`] for more documentation. -pub use crate::{service::Service, worker::{NetworkProvider, Worker, Role}}; +pub use crate::{ + service::Service, + worker::{NetworkProvider, Role, Worker}, +}; use std::{sync::Arc, time::Duration}; -use futures::channel::{mpsc, oneshot}; -use futures::Stream; +use futures::{ + channel::{mpsc, oneshot}, + Stream, +}; use sc_client_api::blockchain::HeaderBackend; use sc_network::{DhtEvent, Multiaddr, PeerId}; +use sp_api::ProvideRuntimeApi; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId}; use sp_runtime::traits::Block as BlockT; -use sp_api::ProvideRuntimeApi; mod error; mod interval; @@ -141,15 +146,8 @@ where { let (to_worker, from_service) = mpsc::channel(0); - let worker = Worker::new( - from_service, - client, - network, - dht_event_rx, - role, - prometheus_registry, - config, - ); + let worker = + Worker::new(from_service, client, network, dht_event_rx, role, prometheus_registry, config); let service = Service::new(to_worker); (worker, service) @@ -160,5 +158,5 @@ pub(crate) enum ServicetoWorkerMsg { /// See [`Service::get_addresses_by_authority_id`]. GetAddressesByAuthorityId(AuthorityId, oneshot::Sender>>), /// See [`Service::get_authority_id_by_peer_id`]. - GetAuthorityIdByPeerId(PeerId, oneshot::Sender>) + GetAuthorityIdByPeerId(PeerId, oneshot::Sender>), } diff --git a/substrate/client/authority-discovery/src/service.rs b/substrate/client/authority-discovery/src/service.rs index a787ff8f51c2139d3c93ddfa324296c9f14d076f..2e5ae66e4dd4a0e193af551082b77a42aac8d0e0 100644 --- a/substrate/client/authority-discovery/src/service.rs +++ b/substrate/client/authority-discovery/src/service.rs @@ -20,8 +20,10 @@ use std::fmt::Debug; use crate::ServicetoWorkerMsg; -use futures::channel::{mpsc, oneshot}; -use futures::SinkExt; +use futures::{ + channel::{mpsc, oneshot}, + SinkExt, +}; use sc_network::{Multiaddr, PeerId}; use sp_authority_discovery::AuthorityId; @@ -42,9 +44,7 @@ impl Debug for Service { /// [`crate::Worker`]'s local address cache for a given [`AuthorityId`]. impl Service { pub(crate) fn new(to_worker: mpsc::Sender) -> Self { - Self { - to_worker, - } + Self { to_worker } } /// Get the addresses for the given [`AuthorityId`] from the local address @@ -59,7 +59,10 @@ impl Service { /// enforced today, given that there are still authorities out there /// publishing the addresses of their sentry nodes on the DHT. In the future /// this guarantee can be provided. - pub async fn get_addresses_by_authority_id(&mut self, authority: AuthorityId) -> Option> { + pub async fn get_addresses_by_authority_id( + &mut self, + authority: AuthorityId, + ) -> Option> { let (tx, rx) = oneshot::channel(); self.to_worker diff --git a/substrate/client/authority-discovery/src/tests.rs b/substrate/client/authority-discovery/src/tests.rs index 78e978e07a1a0418f1f6705b56f32e236ddf7e5e..ef2c2f24634b94418879fdc8b533574a7ff15d5b 100644 --- a/substrate/client/authority-discovery/src/tests.rs +++ b/substrate/client/authority-discovery/src/tests.rs @@ -16,15 +16,24 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{new_worker_and_service, worker::{tests::{TestApi, TestNetwork}, Role}}; +use crate::{ + new_worker_and_service, + worker::{ + tests::{TestApi, TestNetwork}, + Role, + }, +}; -use std::sync::Arc; use futures::{channel::mpsc::channel, executor::LocalPool, task::LocalSpawn}; -use libp2p::core::{multiaddr::{Multiaddr, Protocol}, PeerId}; +use libp2p::core::{ + multiaddr::{Multiaddr, Protocol}, + PeerId, +}; +use std::sync::Arc; use sp_authority_discovery::AuthorityId; use sp_core::crypto::key_types; -use sp_keystore::{CryptoStore, testing::KeyStore}; +use sp_keystore::{testing::KeyStore, CryptoStore}; #[test] fn get_addresses_and_authority_id() { @@ -44,13 +53,12 @@ fn get_addresses_and_authority_id() { }); let remote_peer_id = PeerId::random(); - let remote_addr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + let remote_addr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" + .parse::() .unwrap() .with(Protocol::P2p(remote_peer_id.clone().into())); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); + let test_api = Arc::new(TestApi { authorities: vec![] }); let (mut worker, mut service) = new_worker_and_service( test_api, diff --git a/substrate/client/authority-discovery/src/worker.rs b/substrate/client/authority-discovery/src/worker.rs index bb9207e4e7ea639a2c0b7ac09bc82b7885dcbb77..905d17c72c0412dedee96e3c68f4aa5a82c68a38 100644 --- a/substrate/client/authority-discovery/src/worker.rs +++ b/substrate/client/authority-discovery/src/worker.rs @@ -16,43 +16,49 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{error::{Error, Result}, interval::ExpIncInterval, ServicetoWorkerMsg}; +use crate::{ + error::{Error, Result}, + interval::ExpIncInterval, + ServicetoWorkerMsg, +}; -use std::collections::{HashMap, HashSet}; -use std::convert::TryInto; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::Duration; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + marker::PhantomData, + sync::Arc, + time::Duration, +}; -use futures::channel::mpsc; -use futures::{future, FutureExt, Stream, StreamExt, stream::Fuse}; +use futures::{channel::mpsc, future, stream::Fuse, FutureExt, Stream, StreamExt}; use addr_cache::AddrCache; use async_trait::async_trait; use codec::Decode; use ip_network::IpNetwork; -use libp2p::{core::multiaddr, multihash::{Multihash, Hasher}}; +use libp2p::{ + core::multiaddr, + multihash::{Hasher, Multihash}, +}; use log::{debug, error, log_enabled}; -use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; +use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use rand::{seq::SliceRandom, thread_rng}; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{ - DhtEvent, - ExHashT, - Multiaddr, - NetworkStateInfo, - PeerId, +use sc_network::{DhtEvent, ExHashT, Multiaddr, NetworkStateInfo, PeerId}; +use sp_api::ProvideRuntimeApi; +use sp_authority_discovery::{ + AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, }; -use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; use sp_core::crypto::{key_types, CryptoTypePublicPair, Pair}; use sp_keystore::CryptoStore; -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; -use sp_api::ProvideRuntimeApi; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; mod addr_cache; /// Dht payload schemas generated from Protobuf definitions via Prost crate in build.rs. -mod schema { include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); } +mod schema { + include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); +} #[cfg(test)] pub mod tests; @@ -72,7 +78,6 @@ pub enum Role { Discover, } - /// An authority discovery [`Worker`] can publish the local node's addresses as well as discover /// those of other nodes via a Kademlia DHT. /// @@ -141,8 +146,7 @@ where Block: BlockT + Unpin + 'static, Network: NetworkProvider, Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: - AuthorityDiscoveryApi, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { /// Construct a [`Worker`]. @@ -161,33 +165,24 @@ where // thus timely retries are not needed. For this reasoning use an exponentially increasing // interval for `publish_interval`, `query_interval` and `priority_group_set_interval` // instead of a constant interval. - let publish_interval = ExpIncInterval::new( - Duration::from_secs(2), - config.max_publish_interval, - ); - let query_interval = ExpIncInterval::new( - Duration::from_secs(2), - config.max_query_interval, - ); + let publish_interval = + ExpIncInterval::new(Duration::from_secs(2), config.max_publish_interval); + let query_interval = ExpIncInterval::new(Duration::from_secs(2), config.max_query_interval); // An `ExpIncInterval` is overkill here because the interval is constant, but consistency // is more simple. - let publish_if_changed_interval = ExpIncInterval::new( - config.keystore_refresh_interval, - config.keystore_refresh_interval - ); + let publish_if_changed_interval = + ExpIncInterval::new(config.keystore_refresh_interval, config.keystore_refresh_interval); let addr_cache = AddrCache::new(); let metrics = match prometheus_registry { - Some(registry) => { - match Metrics::register(®istry) { - Ok(metrics) => Some(metrics), - Err(e) => { - error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); - None - }, - } + Some(registry) => match Metrics::register(®istry) { + Ok(metrics) => Some(metrics), + Err(e) => { + error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); + None + }, }, None => None, }; @@ -262,23 +257,23 @@ where let _ = sender.send( self.addr_cache.get_addresses_by_authority_id(&authority).map(Clone::clone), ); - } + }, ServicetoWorkerMsg::GetAuthorityIdByPeerId(peer_id, sender) => { - let _ = sender.send( - self.addr_cache.get_authority_id_by_peer_id(&peer_id).map(Clone::clone), - ); - } + let _ = sender + .send(self.addr_cache.get_authority_id_by_peer_id(&peer_id).map(Clone::clone)); + }, } } fn addresses_to_publish(&self) -> impl Iterator { let peer_id: Multihash = self.network.local_peer_id().into(); let publish_non_global_ips = self.publish_non_global_ips; - self.network.external_addresses() + self.network + .external_addresses() .into_iter() .filter(move |a| { if publish_non_global_ips { - return true; + return true } a.iter().all(|p| match p { @@ -321,9 +316,9 @@ where if let Some(metrics) = &self.metrics { metrics.publish.inc(); - metrics.amount_addresses_last_published.set( - addresses.len().try_into().unwrap_or(std::u64::MAX), - ); + metrics + .amount_addresses_last_published + .set(addresses.len().try_into().unwrap_or(std::u64::MAX)); } let mut serialized_addresses = vec![]; @@ -332,30 +327,26 @@ where .map_err(Error::EncodingProto)?; let keys_vec = keys.iter().cloned().collect::>(); - let signatures = key_store.sign_with_all( - key_types::AUTHORITY_DISCOVERY, - keys_vec.clone(), - serialized_addresses.as_slice(), - ).await.map_err(|_| Error::Signing)?; + let signatures = key_store + .sign_with_all( + key_types::AUTHORITY_DISCOVERY, + keys_vec.clone(), + serialized_addresses.as_slice(), + ) + .await + .map_err(|_| Error::Signing)?; for (sign_result, key) in signatures.into_iter().zip(keys_vec.iter()) { let mut signed_addresses = vec![]; // Verify that all signatures exist for all provided keys. - let signature = sign_result.ok() - .flatten() - .ok_or_else(|| Error::MissingSignature(key.clone()))?; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses) + let signature = + sign_result.ok().flatten().ok_or_else(|| Error::MissingSignature(key.clone()))?; + schema::SignedAuthorityAddresses { addresses: serialized_addresses.clone(), signature } + .encode(&mut signed_addresses) .map_err(Error::EncodingProto)?; - self.network.put_value( - hash_authority_id(key.1.as_ref()), - signed_addresses, - ); + self.network.put_value(hash_authority_id(key.1.as_ref()), signed_addresses); } self.latest_published_keys = keys; @@ -367,11 +358,11 @@ where let id = BlockId::hash(self.client.info().best_hash); let local_keys = match &self.role { - Role::PublishAndDiscover(key_store) => { - key_store.sr25519_public_keys( - key_types::AUTHORITY_DISCOVERY - ).await.into_iter().collect::>() - }, + Role::PublishAndDiscover(key_store) => key_store + .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) + .await + .into_iter() + .collect::>(), Role::Discover => HashSet::new(), }; @@ -393,9 +384,9 @@ where self.in_flight_lookups.clear(); if let Some(metrics) = &self.metrics { - metrics.requests_pending.set( - self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX), - ); + metrics + .requests_pending + .set(self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX)); } Ok(()) @@ -408,15 +399,14 @@ where None => return, }; let hash = hash_authority_id(authority_id.as_ref()); - self.network - .get_value(&hash); + self.network.get_value(&hash); self.in_flight_lookups.insert(hash, authority_id); if let Some(metrics) = &self.metrics { metrics.requests.inc(); - metrics.requests_pending.set( - self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX), - ); + metrics + .requests_pending + .set(self.pending_lookups.len().try_into().unwrap_or(std::u64::MAX)); } } } @@ -431,10 +421,7 @@ where if log_enabled!(log::Level::Debug) { let hashes: Vec<_> = v.iter().map(|(hash, _value)| hash.clone()).collect(); - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' found on Dht.", hashes, - ); + debug!(target: LOG_TARGET, "Value for hash '{:?}' found on Dht.", hashes,); } if let Err(e) = self.handle_dht_value_found_event(v) { @@ -442,22 +429,16 @@ where metrics.handle_value_found_event_failure.inc(); } - debug!( - target: LOG_TARGET, - "Failed to handle Dht value found event: {:?}", e, - ); + debug!(target: LOG_TARGET, "Failed to handle Dht value found event: {:?}", e,); } - } + }, DhtEvent::ValueNotFound(hash) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_not_found"]).inc(); } if self.in_flight_lookups.remove(&hash).is_some() { - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' not found on Dht.", hash - ) + debug!(target: LOG_TARGET, "Value for hash '{:?}' not found on Dht.", hash) } else { debug!( target: LOG_TARGET, @@ -475,21 +456,15 @@ where metrics.dht_event_received.with_label_values(&["value_put"]).inc(); } - debug!( - target: LOG_TARGET, - "Successfully put hash '{:?}' on Dht.", hash, - ) + debug!(target: LOG_TARGET, "Successfully put hash '{:?}' on Dht.", hash,) }, DhtEvent::ValuePutFailed(hash) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); } - debug!( - target: LOG_TARGET, - "Failed to put hash '{:?}' on Dht.", hash - ) - } + debug!(target: LOG_TARGET, "Failed to put hash '{:?}' on Dht.", hash) + }, } } @@ -498,34 +473,36 @@ where values: Vec<(libp2p::kad::record::Key, Vec)>, ) -> Result<()> { // Ensure `values` is not empty and all its keys equal. - let remote_key = values.iter().fold(Ok(None), |acc, (key, _)| { - match acc { + let remote_key = values + .iter() + .fold(Ok(None), |acc, (key, _)| match acc { Ok(None) => Ok(Some(key.clone())), - Ok(Some(ref prev_key)) if prev_key != key => Err( - Error::ReceivingDhtValueFoundEventWithDifferentKeys - ), + Ok(Some(ref prev_key)) if prev_key != key => + Err(Error::ReceivingDhtValueFoundEventWithDifferentKeys), x @ Ok(_) => x, Err(e) => Err(e), - } - })?.ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; + })? + .ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; - let authority_id: AuthorityId = self.in_flight_lookups + let authority_id: AuthorityId = self + .in_flight_lookups .remove(&remote_key) .ok_or(Error::ReceivingUnexpectedRecord)?; let local_peer_id = self.network.local_peer_id(); - let remote_addresses: Vec = values.into_iter() + let remote_addresses: Vec = values + .into_iter() .map(|(_k, v)| { let schema::SignedAuthorityAddresses { signature, addresses } = schema::SignedAuthorityAddresses::decode(v.as_slice()) - .map_err(Error::DecodingProto)?; + .map_err(Error::DecodingProto)?; let signature = AuthoritySignature::decode(&mut &signature[..]) .map_err(Error::EncodingDecodingScale)?; if !AuthorityPair::verify(&signature, &addresses, &authority_id) { - return Err(Error::VerifyingDhtPayload); + return Err(Error::VerifyingDhtPayload) } let addresses = schema::AuthorityAddresses::decode(addresses.as_slice()) @@ -542,40 +519,41 @@ where .into_iter() .flatten() // Ignore [`Multiaddr`]s without [`PeerId`] and own addresses. - .filter(|addr| addr.iter().any(|protocol| { - // Parse to PeerId first as Multihashes of old and new PeerId - // representation don't equal. - // - // See https://github.com/libp2p/rust-libp2p/issues/555 for - // details. - if let multiaddr::Protocol::P2p(hash) = protocol { - let peer_id = match PeerId::from_multihash(hash) { - Ok(peer_id) => peer_id, - Err(_) => return false, // Discard address. - }; - - // Discard if equal to local peer id, keep if it differs. - return !(peer_id == local_peer_id); - } + .filter(|addr| { + addr.iter().any(|protocol| { + // Parse to PeerId first as Multihashes of old and new PeerId + // representation don't equal. + // + // See https://github.com/libp2p/rust-libp2p/issues/555 for + // details. + if let multiaddr::Protocol::P2p(hash) = protocol { + let peer_id = match PeerId::from_multihash(hash) { + Ok(peer_id) => peer_id, + Err(_) => return false, // Discard address. + }; + + // Discard if equal to local peer id, keep if it differs. + return !(peer_id == local_peer_id) + } - false // `protocol` is not a [`Protocol::P2p`], let's keep looking. - })) + false // `protocol` is not a [`Protocol::P2p`], let's keep looking. + }) + }) .take(MAX_ADDRESSES_PER_AUTHORITY) .collect(); if !remote_addresses.is_empty() { self.addr_cache.insert(authority_id, remote_addresses); if let Some(metrics) = &self.metrics { - metrics.known_authorities_count.set( - self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX) - ); + metrics + .known_authorities_count + .set(self.addr_cache.num_ids().try_into().unwrap_or(std::u64::MAX)); } } Ok(()) } /// Retrieve our public keys within the current and next authority set. - // // A node might have multiple authority discovery keys within its keystore, e.g. an old one and // one for the upcoming session. In addition it could be participating in the current and (/ or) // next authority set with two keys. The function does not return all of the local authority @@ -591,14 +569,16 @@ where .collect::>(); let id = BlockId::hash(client.info().best_hash); - let authorities = client.runtime_api() + let authorities = client + .runtime_api() .authorities(&id) .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .map(std::convert::Into::into) .collect::>(); - let intersection = local_pub_keys.intersection(&authorities) + let intersection = local_pub_keys + .intersection(&authorities) .cloned() .map(std::convert::Into::into) .collect(); @@ -655,7 +635,7 @@ impl Metrics { publish: register( Counter::new( "authority_discovery_times_published_total", - "Number of times authority discovery has published external addresses." + "Number of times authority discovery has published external addresses.", )?, registry, )?, @@ -663,7 +643,7 @@ impl Metrics { Gauge::new( "authority_discovery_amount_external_addresses_last_published", "Number of external addresses published when authority discovery last \ - published addresses." + published addresses.", )?, registry, )?, @@ -671,14 +651,14 @@ impl Metrics { Counter::new( "authority_discovery_authority_addresses_requested_total", "Number of times authority discovery has requested external addresses of a \ - single authority." + single authority.", )?, registry, )?, requests_pending: register( Gauge::new( "authority_discovery_authority_address_requests_pending", - "Number of pending authority address requests." + "Number of pending authority address requests.", )?, registry, )?, @@ -686,7 +666,7 @@ impl Metrics { CounterVec::new( Opts::new( "authority_discovery_dht_event_received", - "Number of dht events received by authority discovery." + "Number of dht events received by authority discovery.", ), &["name"], )?, @@ -695,14 +675,14 @@ impl Metrics { handle_value_found_event_failure: register( Counter::new( "authority_discovery_handle_value_found_event_failure", - "Number of times handling a dht value found event failed." + "Number of times handling a dht value found event failed.", )?, registry, )?, known_authorities_count: register( Gauge::new( "authority_discovery_known_authorities_count", - "Number of authorities known by authority discovery." + "Number of authorities known by authority discovery.", )?, registry, )?, diff --git a/substrate/client/authority-discovery/src/worker/addr_cache.rs b/substrate/client/authority-discovery/src/worker/addr_cache.rs index c9b0711803ba9f0c54a1d9797424499d8e618e78..3f9cee476d68ce9344cb8555d894bded3db4592c 100644 --- a/substrate/client/authority-discovery/src/worker/addr_cache.rs +++ b/substrate/client/authority-discovery/src/worker/addr_cache.rs @@ -19,8 +19,8 @@ use libp2p::core::multiaddr::{Multiaddr, Protocol}; use std::collections::HashMap; -use sp_authority_discovery::AuthorityId; use sc_network::PeerId; +use sp_authority_discovery::AuthorityId; /// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. pub(super) struct AddrCache { @@ -45,27 +45,34 @@ impl AddrCache { addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); // Insert into `self.peer_id_to_authority_id`. - let peer_ids = addresses.iter() + let peer_ids = addresses + .iter() .map(|a| peer_id_from_multiaddr(a)) .filter_map(|peer_id| peer_id); for peer_id in peer_ids.clone() { - let former_auth = match self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()) { - Some(a) if a != authority_id => a, - _ => continue, - }; + let former_auth = + match self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()) { + Some(a) if a != authority_id => a, + _ => continue, + }; // PeerId was associated to a different authority id before. // Remove corresponding authority from `self.authority_id_to_addresses`. let former_auth_addrs = match self.authority_id_to_addresses.get_mut(&former_auth) { Some(a) => a, - None => { debug_assert!(false); continue } + None => { + debug_assert!(false); + continue + }, }; former_auth_addrs.retain(|a| peer_id_from_multiaddr(a).map_or(true, |p| p != peer_id)); } // Insert into `self.authority_id_to_addresses`. - for former_addr in - self.authority_id_to_addresses.insert(authority_id.clone(), addresses.clone()).unwrap_or_default() + for former_addr in self + .authority_id_to_addresses + .insert(authority_id.clone(), addresses.clone()) + .unwrap_or_default() { // Must remove from `self.peer_id_to_authority_id` any PeerId formerly associated // to that authority but that can't be found in its new addresses. @@ -87,7 +94,10 @@ impl AddrCache { } /// Returns the addresses for the given [`AuthorityId`]. - pub fn get_addresses_by_authority_id(&self, authority_id: &AuthorityId) -> Option<&Vec> { + pub fn get_addresses_by_authority_id( + &self, + authority_id: &AuthorityId, + ) -> Option<&Vec> { self.authority_id_to_addresses.get(&authority_id) } @@ -100,7 +110,9 @@ impl AddrCache { /// [`AuthorityId`]s. pub fn retain_ids(&mut self, authority_ids: &Vec) { // The below logic could be replaced by `BtreeMap::drain_filter` once it stabilized. - let authority_ids_to_remove = self.authority_id_to_addresses.iter() + let authority_ids_to_remove = self + .authority_id_to_addresses + .iter() .filter(|(id, _addresses)| !authority_ids.contains(id)) .map(|entry| entry.0) .cloned() @@ -111,7 +123,8 @@ impl AddrCache { let addresses = self.authority_id_to_addresses.remove(&authority_id_to_remove); // Remove other entries from `self.peer_id_to_authority_id`. - let peer_ids = addresses.iter() + let peer_ids = addresses + .iter() .flatten() .map(|a| peer_id_from_multiaddr(a)) .filter_map(|peer_id| peer_id); @@ -125,10 +138,12 @@ impl AddrCache { } fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { - addr.iter().last().and_then(|protocol| if let Protocol::P2p(multihash) = protocol { - PeerId::from_multihash(multihash).ok() - } else { - None + addr.iter().last().and_then(|protocol| { + if let Protocol::P2p(multihash) = protocol { + PeerId::from_multihash(multihash).ok() + } else { + None + } }) } @@ -159,9 +174,11 @@ mod tests { fn arbitrary(g: &mut Gen) -> Self { let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); let peer_id = PeerId::from_multihash( - Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap() - ).unwrap(); - let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), + ) + .unwrap(); + let multiaddr = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" + .parse::() .unwrap() .with(Protocol::P2p(peer_id.into())); @@ -176,12 +193,15 @@ mod tests { fn arbitrary(g: &mut Gen) -> Self { let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); let peer_id = PeerId::from_multihash( - Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap() - ).unwrap(); - let multiaddr1 = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap(), + ) + .unwrap(); + let multiaddr1 = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333" + .parse::() .unwrap() .with(Protocol::P2p(peer_id.clone().into())); - let multiaddr2 = "/ip6/2002:db8:0:0:0:0:0:2/tcp/30133".parse::() + let multiaddr2 = "/ip6/2002:db8:0:0:0:0:0:2/tcp/30133" + .parse::() .unwrap() .with(Protocol::P2p(peer_id.into())); TestMultiaddrsSamePeerCombo(multiaddr1, multiaddr2) @@ -219,11 +239,13 @@ mod tests { cache.retain_ids(&vec![first.0, second.0]); assert_eq!( - None, cache.get_addresses_by_authority_id(&third.0), + None, + cache.get_addresses_by_authority_id(&third.0), "Expect `get_addresses_by_authority_id` to not return `None` for third authority." ); assert_eq!( - None, cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), + None, + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&third.1).unwrap()), "Expect `get_authority_id_by_peer_id` to return `None` for third authority." ); @@ -253,7 +275,10 @@ mod tests { let mut cache = AddrCache::new(); cache.insert(authority1.clone(), vec![multiaddr1.clone()]); - cache.insert(authority1.clone(), vec![multiaddr2.clone(), multiaddr3.clone(), multiaddr4.clone()]); + cache.insert( + authority1.clone(), + vec![multiaddr2.clone(), multiaddr3.clone(), multiaddr4.clone()], + ); assert_eq!( None, diff --git a/substrate/client/authority-discovery/src/worker/tests.rs b/substrate/client/authority-discovery/src/worker/tests.rs index 8be23e4840bde21db4643a526241cf12e7167a6b..b2f6ff544cb09ac84bbc22dfcac03ddaf7ed44c2 100644 --- a/substrate/client/authority-discovery/src/worker/tests.rs +++ b/substrate/client/authority-discovery/src/worker/tests.rs @@ -18,21 +18,26 @@ use crate::worker::schema; -use std::{sync::{Arc, Mutex}, task::Poll}; +use std::{ + sync::{Arc, Mutex}, + task::Poll, +}; use async_trait::async_trait; -use futures::channel::mpsc::{self, channel}; -use futures::executor::{block_on, LocalPool}; -use futures::future::FutureExt; -use futures::sink::SinkExt; -use futures::task::LocalSpawn; -use libp2p::{kad, core::multiaddr, PeerId}; +use futures::{ + channel::mpsc::{self, channel}, + executor::{block_on, LocalPool}, + future::FutureExt, + sink::SinkExt, + task::LocalSpawn, +}; +use libp2p::{core::multiaddr, kad, PeerId}; use prometheus_endpoint::prometheus::default_registry; -use sp_api::{ProvideRuntimeApi, ApiRef}; +use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_core::crypto::Public; use sp_keystore::{testing::KeyStore, CryptoStore}; -use sp_runtime::traits::{Zero, Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use substrate_test_runtime_client::runtime::Block; use super::*; @@ -46,9 +51,7 @@ impl ProvideRuntimeApi for TestApi { type Api = RuntimeApi; fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RuntimeApi { - authorities: self.authorities.clone(), - }.into() + RuntimeApi { authorities: self.authorities.clone() }.into() } } @@ -135,10 +138,7 @@ impl Default for TestNetwork { let (tx, rx) = mpsc::unbounded(); TestNetwork { peer_id: PeerId::random(), - external_addresses: vec![ - "/ip6/2001:db8::/tcp/30333" - .parse().unwrap(), - ], + external_addresses: vec!["/ip6/2001:db8::/tcp/30333".parse().unwrap()], put_value_call: Default::default(), get_value_call: Default::default(), event_sender: tx, @@ -151,11 +151,17 @@ impl Default for TestNetwork { impl NetworkProvider for TestNetwork { fn put_value(&self, key: kad::record::Key, value: Vec) { self.put_value_call.lock().unwrap().push((key.clone(), value.clone())); - self.event_sender.clone().unbounded_send(TestNetworkEvent::PutCalled(key, value)).unwrap(); + self.event_sender + .clone() + .unbounded_send(TestNetworkEvent::PutCalled(key, value)) + .unwrap(); } fn get_value(&self, key: &kad::record::Key) { self.get_value_call.lock().unwrap().push(key.clone()); - self.event_sender.clone().unbounded_send(TestNetworkEvent::GetCalled(key.clone())).unwrap(); + self.event_sender + .clone() + .unbounded_send(TestNetworkEvent::GetCalled(key.clone())) + .unwrap(); } } @@ -175,9 +181,8 @@ async fn build_dht_event( key_store: &KeyStore, ) -> (libp2p::kad::record::Key, Vec) { let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { - addresses: addresses.into_iter().map(|a| a.to_vec()).collect() - }.encode(&mut serialized_addresses) + schema::AuthorityAddresses { addresses: addresses.into_iter().map(|a| a.to_vec()).collect() } + .encode(&mut serialized_addresses) .map_err(Error::EncodingProto) .unwrap(); @@ -192,11 +197,9 @@ async fn build_dht_event( .unwrap(); let mut signed_addresses = vec![]; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature, - } - .encode(&mut signed_addresses).unwrap(); + schema::SignedAuthorityAddresses { addresses: serialized_addresses.clone(), signature } + .encode(&mut signed_addresses) + .unwrap(); let key = hash_authority_id(&public_key.to_raw_vec()); let value = signed_addresses; @@ -208,9 +211,7 @@ fn new_registers_metrics() { let (_dht_event_tx, dht_event_rx) = mpsc::channel(1000); let network: Arc = Arc::new(Default::default()); let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); + let test_api = Arc::new(TestApi { authorities: vec![] }); let registry = prometheus_endpoint::Registry::new(); @@ -275,65 +276,67 @@ fn publish_discover_cycle() { let key_store = KeyStore::new(); - let _ = pool.spawner().spawn_local_obj(async move { - let node_a_public = key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) - .await - .unwrap(); - let test_api = Arc::new(TestApi { - authorities: vec![node_a_public.into()], - }); - - let (_to_worker, from_service) = mpsc::channel(0); - let mut worker = Worker::new( - from_service, - test_api, - network.clone(), - Box::pin(dht_event_rx), - Role::PublishAndDiscover(key_store.into()), - None, - Default::default(), - ); - - worker.publish_ext_addresses(false).await.unwrap(); - - // Expect authority discovery to put a new record onto the dht. - assert_eq!(network.put_value_call.lock().unwrap().len(), 1); - - let dht_event = { - let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); - sc_network::DhtEvent::ValueFound(vec![(key, value)]) - }; - - // Node B discovering node A's address. - - let (mut dht_event_tx, dht_event_rx) = channel(1000); - let test_api = Arc::new(TestApi { - // Make sure node B identifies node A as an authority. - authorities: vec![node_a_public.into()], - }); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - - let (_to_worker, from_service) = mpsc::channel(0); - let mut worker = Worker::new( - from_service, - test_api, - network.clone(), - Box::pin(dht_event_rx), - Role::PublishAndDiscover(key_store.into()), - None, - Default::default(), - ); - - dht_event_tx.try_send(dht_event.clone()).unwrap(); - - worker.refill_pending_lookups_queue().await.unwrap(); - worker.start_new_lookups(); - - // Make authority discovery handle the event. - worker.handle_dht_event(dht_event).await; - }.boxed_local().into()); + let _ = pool.spawner().spawn_local_obj( + async move { + let node_a_public = key_store + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .await + .unwrap(); + let test_api = Arc::new(TestApi { authorities: vec![node_a_public.into()] }); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + Box::pin(dht_event_rx), + Role::PublishAndDiscover(key_store.into()), + None, + Default::default(), + ); + + worker.publish_ext_addresses(false).await.unwrap(); + + // Expect authority discovery to put a new record onto the dht. + assert_eq!(network.put_value_call.lock().unwrap().len(), 1); + + let dht_event = { + let (key, value) = network.put_value_call.lock().unwrap().pop().unwrap(); + sc_network::DhtEvent::ValueFound(vec![(key, value)]) + }; + + // Node B discovering node A's address. + + let (mut dht_event_tx, dht_event_rx) = channel(1000); + let test_api = Arc::new(TestApi { + // Make sure node B identifies node A as an authority. + authorities: vec![node_a_public.into()], + }); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + + let (_to_worker, from_service) = mpsc::channel(0); + let mut worker = Worker::new( + from_service, + test_api, + network.clone(), + Box::pin(dht_event_rx), + Role::PublishAndDiscover(key_store.into()), + None, + Default::default(), + ); + + dht_event_tx.try_send(dht_event.clone()).unwrap(); + + worker.refill_pending_lookups_queue().await.unwrap(); + worker.start_new_lookups(); + + // Make authority discovery handle the event. + worker.handle_dht_event(dht_event).await; + } + .boxed_local() + .into(), + ); pool.run(); } @@ -345,9 +348,7 @@ fn terminate_when_event_stream_terminates() { let (dht_event_tx, dht_event_rx) = channel(1000); let network: Arc = Arc::new(Default::default()); let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); + let test_api = Arc::new(TestApi { authorities: vec![] }); let (to_worker, from_service) = mpsc::channel(0); let worker = Worker::new( @@ -358,7 +359,8 @@ fn terminate_when_event_stream_terminates() { Role::PublishAndDiscover(key_store.into()), None, Default::default(), - ).run(); + ) + .run(); futures::pin_mut!(worker); block_on(async { @@ -367,7 +369,8 @@ fn terminate_when_event_stream_terminates() { // Drop sender side of service channel. drop(to_worker); assert_eq!( - Poll::Pending, futures::poll!(&mut worker), + Poll::Pending, + futures::poll!(&mut worker), "Expect the authority discovery module not to terminate once the \ sender side of the service channel is closed.", ); @@ -377,7 +380,8 @@ fn terminate_when_event_stream_terminates() { drop(dht_event_tx); assert_eq!( - Poll::Ready(()), futures::poll!(&mut worker), + Poll::Ready(()), + futures::poll!(&mut worker), "Expect the authority discovery module to terminate once the \ sending side of the dht event channel is closed.", ); @@ -390,14 +394,13 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { let peer_id = PeerId::random(); let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) + address.with(multiaddr::Protocol::P2p(peer_id.into())) }; let remote_key_store = KeyStore::new(); - let remote_public_key: AuthorityId = block_on( - remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None), - ).unwrap().into(); + let remote_public_key: AuthorityId = + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap() + .into(); let (mut dht_event_tx, dht_event_rx) = channel(1); let (network, mut network_events) = { @@ -407,9 +410,7 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { }; let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![remote_public_key.clone()], - }); + let test_api = Arc::new(TestApi { authorities: vec![remote_public_key.clone()] }); let mut pool = LocalPool::new(); let (mut to_worker, from_service) = mpsc::channel(1); @@ -427,30 +428,35 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { // // As this is a local pool, only one future at a time will have the CPU and // can make progress until the future returns `Pending`. - let _ = pool.spawner().spawn_local_obj(async move { - // Refilling `pending_lookups` only happens every X minutes. Fast - // forward by calling `refill_pending_lookups_queue` directly. - worker.refill_pending_lookups_queue().await.unwrap(); - worker.run().await - }.boxed_local().into()); + let _ = pool.spawner().spawn_local_obj( + async move { + // Refilling `pending_lookups` only happens every X minutes. Fast + // forward by calling `refill_pending_lookups_queue` directly. + worker.refill_pending_lookups_queue().await.unwrap(); + worker.run().await + } + .boxed_local() + .into(), + ); pool.run_until(async { // Assert worker to trigger a lookup for the one and only authority. - assert!(matches!( - network_events.next().await, - Some(TestNetworkEvent::GetCalled(_)) - )); + assert!(matches!(network_events.next().await, Some(TestNetworkEvent::GetCalled(_)))); // Send an event that should generate an error - dht_event_tx.send(DhtEvent::ValueFound(Default::default())).await + dht_event_tx + .send(DhtEvent::ValueFound(Default::default())) + .await .expect("Channel has capacity of 1."); // Make previously triggered lookup succeed. let dht_event = { let (key, value) = build_dht_event( vec![remote_multiaddr.clone()], - remote_public_key.clone(), &remote_key_store, - ).await; + remote_public_key.clone(), + &remote_key_store, + ) + .await; sc_network::DhtEvent::ValueFound(vec![(key, value)]) }; dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); @@ -458,10 +464,10 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { // Expect authority discovery to function normally, now knowing the // address for the remote node. let (sender, addresses) = futures::channel::oneshot::channel(); - to_worker.send(ServicetoWorkerMsg::GetAddressesByAuthorityId( - remote_public_key, - sender, - )).await.expect("Channel has capacity of 1."); + to_worker + .send(ServicetoWorkerMsg::GetAddressesByAuthorityId(remote_public_key, sender)) + .await + .expect("Channel has capacity of 1."); assert_eq!(Some(vec![remote_multiaddr]), addresses.await.unwrap()); }); } @@ -469,23 +475,19 @@ fn dont_stop_polling_dht_event_stream_after_bogus_event() { #[test] fn limit_number_of_addresses_added_to_cache_per_authority() { let remote_key_store = KeyStore::new(); - let remote_public = block_on(remote_key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap(); + let remote_public = + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap(); - let addresses = (0..100).map(|_| { - let peer_id = PeerId::random(); - let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) - }).collect(); + let addresses = (0..100) + .map(|_| { + let peer_id = PeerId::random(); + let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + address.with(multiaddr::Protocol::P2p(peer_id.into())) + }) + .collect(); - let dht_event = block_on(build_dht_event( - addresses, - remote_public.into(), - &remote_key_store, - )); + let dht_event = block_on(build_dht_event(addresses, remote_public.into(), &remote_key_store)); let (_dht_event_tx, dht_event_rx) = channel(1); @@ -506,16 +508,20 @@ fn limit_number_of_addresses_added_to_cache_per_authority() { worker.handle_dht_value_found_event(vec![dht_event]).unwrap(); assert_eq!( MAX_ADDRESSES_PER_AUTHORITY, - worker.addr_cache.get_addresses_by_authority_id(&remote_public.into()).unwrap().len(), + worker + .addr_cache + .get_addresses_by_authority_id(&remote_public.into()) + .unwrap() + .len(), ); } #[test] fn do_not_cache_addresses_without_peer_id() { let remote_key_store = KeyStore::new(); - let remote_public = block_on(remote_key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap(); + let remote_public = + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap(); let multiaddr_with_peer_id = { let peer_id = PeerId::random(); @@ -524,21 +530,17 @@ fn do_not_cache_addresses_without_peer_id() { address.with(multiaddr::Protocol::P2p(peer_id.into())) }; - let multiaddr_without_peer_id: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); + let multiaddr_without_peer_id: Multiaddr = + "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); let dht_event = block_on(build_dht_event( - vec![ - multiaddr_with_peer_id.clone(), - multiaddr_without_peer_id, - ], + vec![multiaddr_with_peer_id.clone(), multiaddr_without_peer_id], remote_public.into(), &remote_key_store, )); let (_dht_event_tx, dht_event_rx) = channel(1); - let local_test_api = Arc::new(TestApi { - authorities: vec![remote_public.into()], - }); + let local_test_api = Arc::new(TestApi { authorities: vec![remote_public.into()] }); let local_network: Arc = Arc::new(Default::default()); let local_key_store = KeyStore::new(); @@ -578,9 +580,7 @@ fn addresses_to_publish_adds_p2p() { let (_to_worker, from_service) = mpsc::channel(0); let worker = Worker::new( from_service, - Arc::new(TestApi { - authorities: vec![], - }), + Arc::new(TestApi { authorities: vec![] }), network.clone(), Box::pin(dht_event_rx), Role::PublishAndDiscover(Arc::new(KeyStore::new())), @@ -605,17 +605,16 @@ fn addresses_to_publish_respects_existing_p2p_protocol() { let network: Arc = Arc::new(TestNetwork { external_addresses: vec![ "/ip6/2001:db8::/tcp/30333/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC" - .parse().unwrap(), + .parse() + .unwrap(), ], - .. Default::default() + ..Default::default() }); let (_to_worker, from_service) = mpsc::channel(0); let worker = Worker::new( from_service, - Arc::new(TestApi { - authorities: vec![], - }), + Arc::new(TestApi { authorities: vec![] }), network.clone(), Box::pin(dht_event_rx), Role::PublishAndDiscover(Arc::new(KeyStore::new())), @@ -624,7 +623,8 @@ fn addresses_to_publish_respects_existing_p2p_protocol() { ); assert_eq!( - network.external_addresses, worker.addresses_to_publish().collect::>(), + network.external_addresses, + worker.addresses_to_publish().collect::>(), "Expected Multiaddr from `TestNetwork` to not be altered.", ); } @@ -635,21 +635,21 @@ fn lookup_throttling() { let peer_id = PeerId::random(); let address: Multiaddr = "/ip6/2001:db8:0:0:0:0:0:1/tcp/30333".parse().unwrap(); - address.with(multiaddr::Protocol::P2p( - peer_id.into(), - )) + address.with(multiaddr::Protocol::P2p(peer_id.into())) }; let remote_key_store = KeyStore::new(); - let remote_public_keys: Vec = (0..20).map(|_| { - block_on(remote_key_store - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) - .unwrap().into() - }).collect(); - let remote_hash_to_key = remote_public_keys.iter() + let remote_public_keys: Vec = (0..20) + .map(|_| { + block_on(remote_key_store.sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None)) + .unwrap() + .into() + }) + .collect(); + let remote_hash_to_key = remote_public_keys + .iter() .map(|k| (hash_authority_id(k.as_ref()), k.clone())) .collect::>(); - let (mut dht_event_tx, dht_event_rx) = channel(1); let (_to_worker, from_service) = mpsc::channel(0); let mut network = TestNetwork::default(); @@ -668,56 +668,61 @@ fn lookup_throttling() { let mut pool = LocalPool::new(); let metrics = worker.metrics.clone().unwrap(); - let _ = pool.spawner().spawn_local_obj(async move { - // Refilling `pending_lookups` only happens every X minutes. Fast - // forward by calling `refill_pending_lookups_queue` directly. - worker.refill_pending_lookups_queue().await.unwrap(); - worker.run().await - }.boxed_local().into()); + let _ = pool.spawner().spawn_local_obj( + async move { + // Refilling `pending_lookups` only happens every X minutes. Fast + // forward by calling `refill_pending_lookups_queue` directly. + worker.refill_pending_lookups_queue().await.unwrap(); + worker.run().await + } + .boxed_local() + .into(), + ); - pool.run_until(async { - // Assert worker to trigger MAX_IN_FLIGHT_LOOKUPS lookups. - for _ in 0..MAX_IN_FLIGHT_LOOKUPS { + pool.run_until( + async { + // Assert worker to trigger MAX_IN_FLIGHT_LOOKUPS lookups. + for _ in 0..MAX_IN_FLIGHT_LOOKUPS { + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + } + assert_eq!( + metrics.requests_pending.get(), + (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS) as u64 + ); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); + + // Make first lookup succeed. + let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); + let remote_key: AuthorityId = remote_hash_to_key.get(&remote_hash).unwrap().clone(); + let dht_event = { + let (key, value) = + build_dht_event(vec![remote_multiaddr.clone()], remote_key, &remote_key_store) + .await; + sc_network::DhtEvent::ValueFound(vec![(key, value)]) + }; + dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); + + // Assert worker to trigger another lookup. assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert_eq!( + metrics.requests_pending.get(), + (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 1) as u64 + ); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); + + // Make second one fail. + let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); + let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); + dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); + + // Assert worker to trigger another lookup. + assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); + assert_eq!( + metrics.requests_pending.get(), + (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 2) as u64 + ); + assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); } - assert_eq!( - metrics.requests_pending.get(), - (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS) as u64 - ); - assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); - - // Make first lookup succeed. - let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let remote_key: AuthorityId = remote_hash_to_key.get(&remote_hash).unwrap().clone(); - let dht_event = { - let (key, value) = build_dht_event( - vec![remote_multiaddr.clone()], - remote_key, - &remote_key_store - ).await; - sc_network::DhtEvent::ValueFound(vec![(key, value)]) - }; - dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); - - // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); - assert_eq!( - metrics.requests_pending.get(), - (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 1) as u64 - ); - assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); - - // Make second one fail. - let remote_hash = network.get_value_call.lock().unwrap().pop().unwrap(); - let dht_event = sc_network::DhtEvent::ValueNotFound(remote_hash); - dht_event_tx.send(dht_event).await.expect("Channel has capacity of 1."); - - // Assert worker to trigger another lookup. - assert!(matches!(receiver.next().await, Some(TestNetworkEvent::GetCalled(_)))); - assert_eq!( - metrics.requests_pending.get(), - (remote_public_keys.len() - MAX_IN_FLIGHT_LOOKUPS - 2) as u64 - ); - assert_eq!(network.get_value_call.lock().unwrap().len(), MAX_IN_FLIGHT_LOOKUPS); - }.boxed_local()); + .boxed_local(), + ); } diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 590f4275bf760fddaeb3fc8c5286a6f0db89535d..b60606294890434579bdeaf4bfc21d67fe0df64b 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -20,24 +20,30 @@ // FIXME #1021 move this into sp-consensus -use std::{pin::Pin, time, sync::Arc}; -use sc_client_api::backend; use codec::{Decode, Encode}; -use sp_consensus::{evaluation, Proposal, ProofRecording, DisableProofRecording, EnableProofRecording}; +use futures::{ + channel::oneshot, + future, + future::{Future, FutureExt}, + select, +}; +use log::{debug, error, info, trace, warn}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_client_api::backend; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; +use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; +use sp_consensus::{ + evaluation, DisableProofRecording, EnableProofRecording, ProofRecording, Proposal, +}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; -use log::{error, info, debug, trace, warn}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Hash as HashT, Header as HeaderT, DigestFor, BlakeTwo256}, + traits::{BlakeTwo256, Block as BlockT, DigestFor, Hash as HashT, Header as HeaderT}, }; -use sc_transaction_pool_api::{TransactionPool, InPoolTransaction}; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; -use sp_api::{ProvideRuntimeApi, ApiExt}; -use futures::{future, future::{Future, FutureExt}, channel::oneshot, select}; -use sp_blockchain::{HeaderBackend, ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed}; -use std::marker::PhantomData; +use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_proposer_metrics::MetricsLink as PrometheusMetrics; @@ -141,14 +147,18 @@ impl ProposerFactory { } impl ProposerFactory - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, { fn init_with_now( &mut self, @@ -180,26 +190,26 @@ impl ProposerFactory } } -impl sp_consensus::Environment for - ProposerFactory - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, - PR: ProofRecording, +impl sp_consensus::Environment for ProposerFactory +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, + PR: ProofRecording, { type CreateProposer = future::Ready>; type Proposer = Proposer; type Error = sp_blockchain::Error; - fn init( - &mut self, - parent_header: &::Header, - ) -> Self::CreateProposer { + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now)))) } } @@ -220,22 +230,28 @@ pub struct Proposer { _phantom: PhantomData<(B, PR)>, } -impl sp_consensus::Proposer for - Proposer - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, - PR: ProofRecording, +impl sp_consensus::Proposer for Proposer +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, + PR: ProofRecording, { type Transaction = backend::TransactionFor; - type Proposal = Pin, Self::Error> - > + Send>>; + type Proposal = Pin< + Box< + dyn Future, Self::Error>> + + Send, + >, + >; type Error = sp_blockchain::Error; type ProofRecording = PR; type Proof = PR::Proof; @@ -250,36 +266,38 @@ impl sp_consensus::Proposer for let (tx, rx) = oneshot::channel(); let spawn_handle = self.spawn_handle.clone(); - spawn_handle.spawn_blocking("basic-authorship-proposer", Box::pin(async move { - // leave some time for evaluation and block finalization (33%) - let deadline = (self.now)() + max_duration - max_duration / 3; - let res = self.propose_with( - inherent_data, - inherent_digests, - deadline, - block_size_limit, - ).await; - if tx.send(res).is_err() { - trace!("Could not send block production result to proposer!"); - } - })); + spawn_handle.spawn_blocking( + "basic-authorship-proposer", + Box::pin(async move { + // leave some time for evaluation and block finalization (33%) + let deadline = (self.now)() + max_duration - max_duration / 3; + let res = self + .propose_with(inherent_data, inherent_digests, deadline, block_size_limit) + .await; + if tx.send(res).is_err() { + trace!("Could not send block production result to proposer!"); + } + }), + ); - async move { - rx.await? - }.boxed() + async move { rx.await? }.boxed() } } impl Proposer - where - A: TransactionPool, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, - PR: ProofRecording, +where + A: TransactionPool, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: + ApiExt> + BlockBuilderApi, + PR: ProofRecording, { async fn propose_with( self, @@ -287,30 +305,30 @@ impl Proposer inherent_digests: DigestFor, deadline: time::Instant, block_size_limit: Option, - ) -> Result, PR::Proof>, sp_blockchain::Error> { + ) -> Result, PR::Proof>, sp_blockchain::Error> + { /// If the block is full we will attempt to push at most /// this number of transactions before quitting for real. /// It allows us to increase block utilization. const MAX_SKIPPED_TRANSACTIONS: usize = 8; - let mut block_builder = self.client.new_block_at( - &self.parent_id, - inherent_digests, - PR::ENABLED, - )?; + let mut block_builder = + self.client.new_block_at(&self.parent_id, inherent_digests, PR::ENABLED)?; for inherent in block_builder.create_inherents(inherent_data)? { match block_builder.push(inherent) { Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => warn!("⚠️ Dropping non-mandatory inherent from overweight block."), Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => { - error!("❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."); + error!( + "❌️ Mandatory inherent extrinsic returned error. Block cannot be produced." + ); Err(ApplyExtrinsicFailed(Validity(e)))? - } + }, Err(e) => { warn!("❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e); - } - Ok(_) => {} + }, + Ok(_) => {}, } } @@ -320,9 +338,8 @@ impl Proposer let mut unqueue_invalid = Vec::new(); let mut t1 = self.transaction_pool.ready_at(self.parent_number).fuse(); - let mut t2 = futures_timer::Delay::new( - deadline.saturating_duration_since((self.now)()) / 8, - ).fuse(); + let mut t2 = + futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse(); let pending_iterator = select! { res = t1 => res, @@ -349,15 +366,14 @@ impl Proposer "Consensus deadline reached when pushing block transactions, \ proceeding with proposing." ); - break; + break } let pending_tx_data = pending_tx.data().clone(); let pending_tx_hash = pending_tx.hash().clone(); - let block_size = block_builder.estimate_block_size( - self.include_proof_in_block_size_estimation, - ); + let block_size = + block_builder.estimate_block_size(self.include_proof_in_block_size_estimation); if block_size + pending_tx_data.encoded_size() > block_size_limit { if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; @@ -366,11 +382,11 @@ impl Proposer but will try {} more transactions before quitting.", MAX_SKIPPED_TRANSACTIONS - skipped, ); - continue; + continue } else { debug!("Reached block size limit, proceeding with proposing."); hit_block_size_limit = true; - break; + break } } @@ -379,9 +395,8 @@ impl Proposer Ok(()) => { transaction_pushed = true; debug!("[{:?}] Pushed to the block.", pending_tx_hash); - } - Err(ApplyExtrinsicFailed(Validity(e))) - if e.exhausted_resources() => { + }, + Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( @@ -390,20 +405,20 @@ impl Proposer ); } else { debug!("Block is full, proceed with proposing."); - break; + break } - } + }, Err(e) if skipped > 0 => { trace!( "[{:?}] Ignoring invalid transaction when skipping: {}", pending_tx_hash, e ); - } + }, Err(e) => { debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); unqueue_invalid.push(pending_tx_hash); - } + }, } } @@ -418,12 +433,10 @@ impl Proposer let (block, storage_changes, proof) = block_builder.build()?.into_inner(); - self.metrics.report( - |metrics| { - metrics.number_of_transactions.set(block.extrinsics().len() as u64); - metrics.block_constructed.observe(block_timer.elapsed().as_secs_f64()); - } - ); + self.metrics.report(|metrics| { + metrics.number_of_transactions.set(block.extrinsics().len() as u64); + metrics.block_constructed.observe(block_timer.elapsed().as_secs_f64()); + }); info!( "🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", @@ -449,16 +462,14 @@ impl Proposer error!("Failed to verify block encoding/decoding"); } - if let Err(err) = evaluation::evaluate_initial( - &block, - &self.parent_hash, - self.parent_number, - ) { + if let Err(err) = + evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) + { error!("Failed to evaluate authored block: {:?}", err); } - let proof = PR::into_proof(proof) - .map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; + let proof = + PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; Ok(Proposal { block, proof, storage_changes }) } } @@ -467,19 +478,20 @@ impl Proposer mod tests { use super::*; + use futures::executor::block_on; use parking_lot::Mutex; - use sp_consensus::{BlockOrigin, Proposer}; - use substrate_test_runtime_client::{ - prelude::*, TestClientBuilder, runtime::{Extrinsic, Transfer}, TestClientBuilderExt, - }; - use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource}; + use sc_client_api::Backend; use sc_transaction_pool::BasicPool; + use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool, TransactionSource}; use sp_api::Core; use sp_blockchain::HeaderBackend; + use sp_consensus::{BlockOrigin, Environment, Proposer}; use sp_runtime::traits::NumberFor; - use sc_client_api::Backend; - use futures::executor::block_on; - use sp_consensus::Environment; + use substrate_test_runtime_client::{ + prelude::*, + runtime::{Extrinsic, Transfer}, + TestClientBuilder, TestClientBuilderExt, + }; const SOURCE: TransactionSource = TransactionSource::External; @@ -489,16 +501,15 @@ mod tests { nonce, from: AccountKeyring::Alice.into(), to: Default::default(), - }.into_signed_tx() + } + .into_signed_tx() } fn chain_event(header: B::Header) -> ChainEvent - where NumberFor: From + where + NumberFor: From, { - ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - } + ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None } } #[test] @@ -514,25 +525,20 @@ mod tests { client.clone(), ); - block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)]) - ).unwrap(); + block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)])) + .unwrap(); block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(0u64)) + client + .header(&BlockId::Number(0u64)) .expect("header get error") - .expect("there should be header") - )) + .expect("there should be header"), + )), ); - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( @@ -541,20 +547,21 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1; + return value.1 } let old = value.1; let new = old + time::Duration::from_secs(2); *value = (true, new); old - }) + }), ); // when let deadline = time::Duration::from_secs(3); - let block = block_on( - proposer.propose(Default::default(), Default::default(), deadline, None) - ).map(|r| r.block).unwrap(); + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); // then // block should have some extrinsics although we have some more in the pool. @@ -574,13 +581,8 @@ mod tests { client.clone(), ); - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let cell = Mutex::new((false, time::Instant::now())); let proposer = proposer_factory.init_with_now( @@ -589,18 +591,18 @@ mod tests { let mut value = cell.lock(); if !value.0 { value.0 = true; - return value.1; + return value.1 } let new = value.1 + time::Duration::from_secs(160); *value = (true, new); new - }) + }), ); let deadline = time::Duration::from_secs(1); - block_on( - proposer.propose(Default::default(), Default::default(), deadline, None) - ).map(|r| r.block).unwrap(); + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); } #[test] @@ -619,25 +621,19 @@ mod tests { let genesis_hash = client.info().best_hash; let block_id = BlockId::Hash(genesis_hash); - block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)]), - ).unwrap(); + block_on(txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)])).unwrap(); block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(0u64)) + client + .header(&BlockId::Number(0u64)) .expect("header get error") .expect("there should be header"), - )) + )), ); - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let proposer = proposer_factory.init_with_now( &client.header(&block_id).unwrap().unwrap(), @@ -645,9 +641,9 @@ mod tests { ); let deadline = time::Duration::from_secs(9); - let proposal = block_on( - proposer.propose(Default::default(), Default::default(), deadline, None), - ).unwrap(); + let proposal = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .unwrap(); assert_eq!(proposal.block.extrinsics().len(), 1); @@ -655,16 +651,13 @@ mod tests { api.execute_block(&block_id, proposal.block).unwrap(); let state = backend.state_at(block_id).unwrap(); - let changes_trie_state = backend::changes_tries_state_at_block( - &block_id, - backend.changes_trie_storage(), - ).unwrap(); + let changes_trie_state = + backend::changes_tries_state_at_block(&block_id, backend.changes_trie_storage()) + .unwrap(); - let storage_changes = api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - genesis_hash, - ).unwrap(); + let storage_changes = api + .into_storage_changes(&state, changes_trie_state.as_ref(), genesis_hash) + .unwrap(); assert_eq!( proposal.storage_changes.transaction_storage_root, @@ -685,8 +678,10 @@ mod tests { client.clone(), ); - block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![ + block_on(txpool.submit_at( + &BlockId::number(0), + SOURCE, + vec![ extrinsic(0), extrinsic(1), Transfer { @@ -704,22 +699,16 @@ mod tests { }.into_resources_exhausting_tx(), extrinsic(5), extrinsic(6), - ]) - ).unwrap(); - - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - None, - ); - let mut propose_block = | - client: &TestClient, - number, - expected_block_extrinsics, - expected_pool_transactions, - | { + ], + )) + .unwrap(); + + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); + let mut propose_block = |client: &TestClient, + number, + expected_block_extrinsics, + expected_pool_transactions| { let proposer = proposer_factory.init_with_now( &client.header(&BlockId::number(number)).unwrap().unwrap(), Box::new(move || time::Instant::now()), @@ -727,9 +716,10 @@ mod tests { // when let deadline = time::Duration::from_secs(9); - let block = block_on( - proposer.propose(Default::default(), Default::default(), deadline, None) - ).map(|r| r.block).unwrap(); + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); // then // block should have some extrinsics although we have some more in the pool. @@ -741,10 +731,11 @@ mod tests { block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(0u64)) + client + .header(&BlockId::Number(0u64)) .expect("header get error") - .expect("there should be header") - )) + .expect("there should be header"), + )), ); // let's create one block and import it @@ -753,10 +744,11 @@ mod tests { block_on( txpool.maintain(chain_event( - client.header(&BlockId::Number(1)) + client + .header(&BlockId::Number(1)) .expect("header get error") - .expect("there should be header") - )) + .expect("there should be header"), + )), ); // now let's make sure that we can still make some progress @@ -775,7 +767,8 @@ mod tests { spawner.clone(), client.clone(), ); - let genesis_header = client.header(&BlockId::Number(0u64)) + let genesis_header = client + .header(&BlockId::Number(0u64)) .expect("header get error") .expect("there should be header"); @@ -784,40 +777,43 @@ mod tests { .map(|v| Extrinsic::IncludeData(vec![v as u8; 10])) .collect::>(); - let block_limit = genesis_header.encoded_size() - + extrinsics.iter().take(extrinsics_num - 1).map(Encode::encoded_size).sum::() - + Vec::::new().encoded_size(); + let block_limit = genesis_header.encoded_size() + + extrinsics + .iter() + .take(extrinsics_num - 1) + .map(Encode::encoded_size) + .sum::() + + Vec::::new().encoded_size(); - block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics) - ).unwrap(); + block_on(txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics)).unwrap(); block_on(txpool.maintain(chain_event(genesis_header.clone()))); - let mut proposer_factory = ProposerFactory::new( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); // Give it enough time let deadline = time::Duration::from_secs(300); - let block = block_on( - proposer.propose(Default::default(), Default::default(), deadline, Some(block_limit)) - ).map(|r| r.block).unwrap(); + let block = block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + Some(block_limit), + )) + .map(|r| r.block) + .unwrap(); // Based on the block limit, one transaction shouldn't be included. assert_eq!(block.extrinsics().len(), extrinsics_num - 1); let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); - let block = block_on( - proposer.propose(Default::default(), Default::default(), deadline, None, - )).map(|r| r.block).unwrap(); + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); // Without a block limit we should include all of them assert_eq!(block.extrinsics().len(), extrinsics_num); @@ -833,9 +829,14 @@ mod tests { let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); // Give it enough time - let block = block_on( - proposer.propose(Default::default(), Default::default(), deadline, Some(block_limit)) - ).map(|r| r.block).unwrap(); + let block = block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + Some(block_limit), + )) + .map(|r| r.block) + .unwrap(); // The block limit didn't changed, but we now include the proof in the estimation of the // block size and thus, one less transaction should fit into the limit. diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index 133b833cdddc82cd55638af105da3bc861adaf86..2b2fe554efdff5a486a1652825463000c36d1730 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -41,12 +41,12 @@ //! # ); //! // The first step is to create a `ProposerFactory`. //! let mut proposer_factory = ProposerFactory::new( -//! spawner, -//! client.clone(), -//! txpool.clone(), -//! None, -//! None, -//! ); +//! spawner, +//! client.clone(), +//! txpool.clone(), +//! None, +//! None, +//! ); //! //! // From this factory, we create a `Proposer`. //! let proposer = proposer_factory.init( @@ -69,8 +69,7 @@ //! let block = futures::executor::block_on(future).unwrap(); //! println!("Generated block: {:?}", block.block); //! ``` -//! mod basic_authorship; -pub use crate::basic_authorship::{ProposerFactory, Proposer, DEFAULT_BLOCK_SIZE_LIMIT}; +pub use crate::basic_authorship::{Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT}; diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 7d391f8fb85b3f52eef54a3c1ab5eeb096700659..e89421edfb1684a10e39d6505c21a7f31137a414 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -28,14 +28,14 @@ use codec::Encode; -use sp_runtime::{ - generic::BlockId, - traits::{Header as HeaderT, Hash, Block as BlockT, HashFor, DigestFor, NumberFor, One}, +use sp_api::{ + ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; -use sp_api::{ - Core, ApiExt, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestFor, Hash, HashFor, Header as HeaderT, NumberFor, One}, }; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -94,7 +94,9 @@ pub struct BuiltBlock, } -impl>> BuiltBlock { +impl>> + BuiltBlock +{ /// Convert into the inner values. pub fn into_inner(self) -> (Block, StorageChanges, Option) { (self.block, self.storage_changes, self.proof) @@ -103,11 +105,11 @@ impl>> BuiltBl /// Block builder provider pub trait BlockBuilderProvider - where - Block: BlockT, - B: backend::Backend, - Self: Sized, - RA: ProvideRuntimeApi, +where + Block: BlockT, + B: backend::Backend, + Self: Sized, + RA: ProvideRuntimeApi, { /// Create a new block, built on top of `parent`. /// @@ -143,7 +145,8 @@ impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> where Block: BlockT, A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + ApiExt>, + A::Api: + BlockBuilderApi + ApiExt>, B: backend::Backend, { /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. @@ -177,9 +180,7 @@ where let block_id = BlockId::Hash(parent_hash); - api.initialize_block_with_context( - &block_id, ExecutionContext::BlockConstruction, &header, - )?; + api.initialize_block_with_context(&block_id, ExecutionContext::BlockConstruction, &header)?; Ok(Self { parent_hash, @@ -207,12 +208,10 @@ where Ok(Ok(_)) => { extrinsics.push(xt); TransactionOutcome::Commit(Ok(())) - } - Ok(Err(tx_validity)) => { - TransactionOutcome::Rollback( - Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), - ) }, + Ok(Err(tx_validity)) => TransactionOutcome::Rollback(Err( + ApplyExtrinsicFailed::Validity(tx_validity).into(), + )), Err(e) => TransactionOutcome::Rollback(Err(Error::from(e))), } }) @@ -224,9 +223,9 @@ where /// supplied by `self.api`, combined as [`BuiltBlock`]. /// The storage proof will be `Some(_)` when proof recording was enabled. pub fn build(mut self) -> Result>, Error> { - let header = self.api.finalize_block_with_context( - &self.block_id, ExecutionContext::BlockConstruction - )?; + let header = self + .api + .finalize_block_with_context(&self.block_id, ExecutionContext::BlockConstruction)?; debug_assert_eq!( header.extrinsics_root().clone(), @@ -244,11 +243,10 @@ where )?; let parent_hash = self.parent_hash; - let storage_changes = self.api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - parent_hash, - ).map_err(|e| sp_blockchain::Error::StorageChanges(e))?; + let storage_changes = self + .api + .into_storage_changes(&state, changes_trie_state.as_ref(), parent_hash) + .map_err(|e| sp_blockchain::Error::StorageChanges(e))?; Ok(BuiltBlock { block: ::new(header, self.extrinsics), @@ -265,15 +263,17 @@ where inherent_data: sp_inherents::InherentData, ) -> Result, Error> { let block_id = self.block_id; - self.api.execute_in_transaction(move |api| { - // `create_inherents` should not change any state, to ensure this we always rollback - // the transaction. - TransactionOutcome::Rollback(api.inherent_extrinsics_with_context( - &block_id, - ExecutionContext::BlockConstruction, - inherent_data - )) - }).map_err(|e| Error::Application(Box::new(e))) + self.api + .execute_in_transaction(move |api| { + // `create_inherents` should not change any state, to ensure this we always rollback + // the transaction. + TransactionOutcome::Rollback(api.inherent_extrinsics_with_context( + &block_id, + ExecutionContext::BlockConstruction, + inherent_data, + )) + }) + .map_err(|e| Error::Application(Box::new(e))) } /// Estimate the size of the block in the current state. @@ -312,19 +312,22 @@ mod tests { RecordProof::Yes, Default::default(), &*backend, - ).unwrap().build().unwrap(); + ) + .unwrap() + .build() + .unwrap(); let proof = block.proof.expect("Proof is build on request"); let backend = sp_state_machine::create_proof_check_backend::( block.storage_changes.transaction_storage_root, proof, - ).unwrap(); + ) + .unwrap(); - assert!( - backend.storage(&sp_core::storage::well_known_keys::CODE) - .unwrap_err() - .contains("Database missing expected key"), - ); + assert!(backend + .storage(&sp_core::storage::well_known_keys::CODE) + .unwrap_err() + .contains("Database missing expected key"),); } } diff --git a/substrate/client/chain-spec/derive/src/impls.rs b/substrate/client/chain-spec/derive/src/impls.rs index 39984d4df10447e51f534332495bd7ed9596aa8a..73634dcca42e506ba8d696e289b7d2d5a0b8c918 100644 --- a/substrate/client/chain-spec/derive/src/impls.rs +++ b/substrate/client/chain-spec/derive/src/impls.rs @@ -17,9 +17,9 @@ // along with this program. If not, see . use proc_macro2::{Span, TokenStream}; -use quote::quote; -use syn::{DeriveInput, Ident, Error}; use proc_macro_crate::{crate_name, FoundCrate}; +use quote::quote; +use syn::{DeriveInput, Error, Ident}; const CRATE_NAME: &str = "sc-chain-spec"; const ATTRIBUTE_NAME: &str = "forks"; @@ -31,14 +31,18 @@ const ATTRIBUTE_NAME: &str = "forks"; pub fn extension_derive(ast: &DeriveInput) -> proc_macro::TokenStream { derive(ast, |crate_name, name, generics: &syn::Generics, field_names, field_types, fields| { let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - let forks = fields.named.iter().find_map(|f| { - if f.attrs.iter().any(|attr| attr.path.is_ident(ATTRIBUTE_NAME)) { - let typ = &f.ty; - Some(quote! { #typ }) - } else { - None - } - }).unwrap_or_else(|| quote! { #crate_name::NoExtension }); + let forks = fields + .named + .iter() + .find_map(|f| { + if f.attrs.iter().any(|attr| attr.path.is_ident(ATTRIBUTE_NAME)) { + let typ = &f.ty; + Some(quote! { #typ }) + } else { + None + } + }) + .unwrap_or_else(|| quote! { #crate_name::NoExtension }); quote! { impl #impl_generics #crate_name::Extension for #name #ty_generics #where_clause { @@ -80,13 +84,12 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { Ok(FoundCrate::Itself) => Ident::new("serde", Span::call_site()), Ok(FoundCrate::Name(name)) => Ident::new(&name, Span::call_site()), Err(e) => { - let err = Error::new( - Span::call_site(), - &format!("Could not find `serde` crate: {}", e), - ).to_compile_error(); + let err = + Error::new(Span::call_site(), &format!("Could not find `serde` crate: {}", e)) + .to_compile_error(); - return quote!( #err ).into(); - } + return quote!( #err ).into() + }, }; quote! { @@ -131,14 +134,20 @@ pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { pub fn derive( ast: &DeriveInput, derive: impl Fn( - &Ident, &Ident, &syn::Generics, Vec<&Ident>, Vec<&syn::Type>, &syn::FieldsNamed, + &Ident, + &Ident, + &syn::Generics, + Vec<&Ident>, + Vec<&syn::Type>, + &syn::FieldsNamed, ) -> TokenStream, ) -> proc_macro::TokenStream { let err = || { let err = Error::new( Span::call_site(), - "ChainSpecGroup is only available for structs with named fields." - ).to_compile_error(); + "ChainSpecGroup is only available for structs with named fields.", + ) + .to_compile_error(); quote!( #err ).into() }; @@ -168,47 +177,35 @@ pub fn derive( derive(&crate_name, name, &ast.generics, field_names, field_types, fields).into() } -fn generate_fork_fields( - crate_name: &Ident, - names: &[&Ident], - types: &[&syn::Type], -) -> TokenStream { +fn generate_fork_fields(crate_name: &Ident, names: &[&Ident], types: &[&syn::Type]) -> TokenStream { let crate_name = std::iter::repeat(crate_name); quote! { #( pub #names: Option<<#types as #crate_name::Group>::Fork>, )* } } -fn generate_base_to_fork( - fork_name: &Ident, - names: &[&Ident], -) -> TokenStream { +fn generate_base_to_fork(fork_name: &Ident, names: &[&Ident]) -> TokenStream { let names2 = names.to_vec(); - quote!{ + quote! { #fork_name { #( #names: Some(self.#names2.to_fork()), )* } } } -fn generate_combine_with( - names: &[&Ident], -) -> TokenStream { +fn generate_combine_with(names: &[&Ident]) -> TokenStream { let names2 = names.to_vec(); - quote!{ + quote! { #( self.#names.combine_with(other.#names2); )* } } -fn generate_fork_to_base( - fork: &Ident, - names: &[&Ident], -) -> TokenStream { +fn generate_fork_to_base(fork: &Ident, names: &[&Ident]) -> TokenStream { let names2 = names.to_vec(); - quote!{ + quote! { Some(#fork { #( #names: self.#names2?.to_base()?, )* }) diff --git a/substrate/client/chain-spec/src/chain_spec.rs b/substrate/client/chain-spec/src/chain_spec.rs index 59b55707e182bcf797529630f81dc2b7b4c3054d..681ab8ea640a293a7a49c526d5bccd1b061d9af7 100644 --- a/substrate/client/chain-spec/src/chain_spec.rs +++ b/substrate/client/chain-spec/src/chain_spec.rs @@ -19,15 +19,20 @@ //! Substrate chain configurations. #![warn(missing_docs)] -use std::{borrow::Cow, fs::File, path::PathBuf, sync::Arc, collections::HashMap}; -use serde::{Serialize, Deserialize}; -use sp_core::{storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}, Bytes}; -use sp_runtime::BuildStorage; -use serde_json as json; -use crate::{RuntimeGenesis, ChainType, extension::GetExtension, Properties}; +use crate::{extension::GetExtension, ChainType, Properties, RuntimeGenesis}; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use serde::{Deserialize, Serialize}; +use serde_json as json; +use sp_core::{ + storage::{ChildInfo, Storage, StorageChild, StorageData, StorageKey}, + Bytes, +}; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + BuildStorage, +}; +use std::{borrow::Cow, collections::HashMap, fs::File, path::PathBuf, sync::Arc}; enum GenesisSource { File(PathBuf), @@ -56,8 +61,8 @@ impl GenesisSource { match self { Self::File(path) => { - let file = File::open(path) - .map_err(|e| format!("Error opening spec file: {}", e))?; + let file = + File::open(path).map_err(|e| format!("Error opening spec file: {}", e))?; let genesis: GenesisContainer = json::from_reader(file) .map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) @@ -69,22 +74,25 @@ impl GenesisSource { }, Self::Factory(f) => Ok(Genesis::Runtime(f())), Self::Storage(storage) => { - let top = storage.top + let top = storage + .top .iter() .map(|(k, v)| (StorageKey(k.clone()), StorageData(v.clone()))) .collect(); - let children_default = storage.children_default + let children_default = storage + .children_default .iter() - .map(|(k, child)| - ( - StorageKey(k.clone()), - child.data + .map(|(k, child)| { + ( + StorageKey(k.clone()), + child + .data .iter() .map(|(k, v)| (StorageKey(k.clone()), StorageData(v.clone()))) - .collect() - ) - ) + .collect(), + ) + }) .collect(); Ok(Genesis::Raw(RawGenesis { top, children_default })) @@ -99,24 +107,24 @@ impl BuildStorage for ChainSpec { Genesis::Runtime(gc) => gc.build_storage(), Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children_default: children_map.into_iter().map(|(storage_key, child_content)| { - let child_info = ChildInfo::new_default(storage_key.0.as_slice()); - ( - storage_key.0, - StorageChild { - data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - child_info, - }, - ) - }).collect(), + children_default: children_map + .into_iter() + .map(|(storage_key, child_content)| { + let child_info = ChildInfo::new_default(storage_key.0.as_slice()); + ( + storage_key.0, + StorageChild { + data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + child_info, + }, + ) + }) + .collect(), }), } } - fn assimilate_storage( - &self, - _: &mut Storage, - ) -> Result<(), String> { + fn assimilate_storage(&self, _: &mut Storage) -> Result<(), String> { Err("`assimilate_storage` not implemented for `ChainSpec`.".into()) } } @@ -181,10 +189,7 @@ pub struct ChainSpec { impl Clone for ChainSpec { fn clone(&self) -> Self { - ChainSpec { - client_spec: self.client_spec.clone(), - genesis: self.genesis.clone(), - } + ChainSpec { client_spec: self.client_spec.clone(), genesis: self.genesis.clone() } } } @@ -258,10 +263,7 @@ impl ChainSpec { code_substitutes: HashMap::new(), }; - ChainSpec { - client_spec, - genesis: GenesisSource::Factory(Arc::new(constructor)), - } + ChainSpec { client_spec, genesis: GenesisSource::Factory(Arc::new(constructor)) } } /// Type of the chain. @@ -281,22 +283,15 @@ impl ChainSpec { let json = json.into(); let client_spec = json::from_slice(json.as_ref()) .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - client_spec, - genesis: GenesisSource::Binary(json), - }) + Ok(ChainSpec { client_spec, genesis: GenesisSource::Binary(json) }) } /// Parse json file into a `ChainSpec` pub fn from_json_file(path: PathBuf) -> Result { - let file = File::open(&path) - .map_err(|e| format!("Error opening spec file: {}", e))?; - let client_spec = json::from_reader(file) - .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - client_spec, - genesis: GenesisSource::File(path), - }) + let file = File::open(&path).map_err(|e| format!("Error opening spec file: {}", e))?; + let client_spec = + json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(ChainSpec { client_spec, genesis: GenesisSource::File(path) }) } } @@ -312,33 +307,34 @@ impl ChainSpec { let genesis = match (raw, self.genesis.resolve()?) { (true, Genesis::Runtime(g)) => { let storage = g.build_storage()?; - let top = storage.top.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(); - let children_default = storage.children_default.into_iter() - .map(|(sk, child)| ( - StorageKey(sk), - child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - )) + let top = + storage.top.into_iter().map(|(k, v)| (StorageKey(k), StorageData(v))).collect(); + let children_default = storage + .children_default + .into_iter() + .map(|(sk, child)| { + ( + StorageKey(sk), + child + .data + .into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + ) + }) .collect(); Genesis::Raw(RawGenesis { top, children_default }) }, (_, genesis) => genesis, }; - Ok(JsonContainer { - client_spec: self.client_spec.clone(), - genesis, - }) + Ok(JsonContainer { client_spec: self.client_spec.clone(), genesis }) } /// Dump to json string. pub fn as_json(&self, raw: bool) -> Result { let container = self.json_container(raw)?; - json::to_string_pretty(&container) - .map_err(|e| format!("Error generating spec json: {}", e)) + json::to_string_pretty(&container).map_err(|e| format!("Error generating spec json: {}", e)) } } @@ -404,7 +400,11 @@ where } fn code_substitutes(&self) -> std::collections::HashMap> { - self.client_spec.code_substitutes.iter().map(|(h, c)| (h.clone(), c.0.clone())).collect() + self.client_spec + .code_substitutes + .iter() + .map(|(h, c)| (h.clone(), c.0.clone())) + .collect() } } @@ -417,7 +417,8 @@ pub struct LightSyncState { /// The babe weight of the finalized block. pub babe_finalized_block_weight: sp_consensus_babe::BabeBlockWeight, /// The authority set for grandpa. - pub grandpa_authority_set: sc_finality_grandpa::AuthoritySet<::Hash, NumberFor>, + pub grandpa_authority_set: + sc_finality_grandpa::AuthoritySet<::Hash, NumberFor>, } impl LightSyncState { @@ -427,25 +428,25 @@ impl LightSyncState { SerializableLightSyncState { finalized_block_header: StorageData(self.finalized_block_header.encode()), - babe_epoch_changes: - StorageData(self.babe_epoch_changes.encode()), - babe_finalized_block_weight: - self.babe_finalized_block_weight, - grandpa_authority_set: - StorageData(self.grandpa_authority_set.encode()), + babe_epoch_changes: StorageData(self.babe_epoch_changes.encode()), + babe_finalized_block_weight: self.babe_finalized_block_weight, + grandpa_authority_set: StorageData(self.grandpa_authority_set.encode()), } } /// Convert from a `SerializableLightSyncState`. - pub fn from_serializable(serialized: &SerializableLightSyncState) -> Result { + pub fn from_serializable( + serialized: &SerializableLightSyncState, + ) -> Result { Ok(Self { - finalized_block_header: codec::Decode::decode(&mut &serialized.finalized_block_header.0[..])?, - babe_epoch_changes: - codec::Decode::decode(&mut &serialized.babe_epoch_changes.0[..])?, - babe_finalized_block_weight: - serialized.babe_finalized_block_weight, - grandpa_authority_set: - codec::Decode::decode(&mut &serialized.grandpa_authority_set.0[..])?, + finalized_block_header: codec::Decode::decode( + &mut &serialized.finalized_block_header.0[..], + )?, + babe_epoch_changes: codec::Decode::decode(&mut &serialized.babe_epoch_changes.0[..])?, + babe_finalized_block_weight: serialized.babe_finalized_block_weight, + grandpa_authority_set: codec::Decode::decode( + &mut &serialized.grandpa_authority_set.0[..], + )?, }) } } @@ -469,12 +470,9 @@ mod tests { struct Genesis(HashMap); impl BuildStorage for Genesis { - fn assimilate_storage( - &self, - storage: &mut Storage, - ) -> Result<(), String> { + fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { storage.top.extend( - self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())) + self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), ); Ok(()) } @@ -485,11 +483,10 @@ mod tests { #[test] fn should_deserialize_example_chain_spec() { let spec1 = TestSpec::from_json_bytes(Cow::Owned( - include_bytes!("../res/chain_spec.json").to_vec() - )).unwrap(); - let spec2 = TestSpec::from_json_file( - PathBuf::from("./res/chain_spec.json") - ).unwrap(); + include_bytes!("../res/chain_spec.json").to_vec(), + )) + .unwrap(); + let spec2 = TestSpec::from_json_file(PathBuf::from("./res/chain_spec.json")).unwrap(); assert_eq!(spec1.as_json(false), spec2.as_json(false)); assert_eq!(spec2.chain_type(), ChainType::Live) @@ -506,8 +503,9 @@ mod tests { #[test] fn should_deserialize_chain_spec_with_extensions() { let spec = TestSpec2::from_json_bytes(Cow::Owned( - include_bytes!("../res/chain_spec2.json").to_vec() - )).unwrap(); + include_bytes!("../res/chain_spec2.json").to_vec(), + )) + .unwrap(); assert_eq!(spec.extensions().my_property, "Test Extension"); } diff --git a/substrate/client/chain-spec/src/extension.rs b/substrate/client/chain-spec/src/extension.rs index 2a6126e4ce2cae2b3e2ee90c0e4747f70073261f..665f51303b6a601ab5f5ceb61f10cbb360d267f2 100644 --- a/substrate/client/chain-spec/src/extension.rs +++ b/substrate/client/chain-spec/src/extension.rs @@ -18,19 +18,21 @@ //! Chain Spec extensions helpers. -use std::fmt::Debug; -use std::any::{TypeId, Any}; +use std::{ + any::{Any, TypeId}, + fmt::Debug, +}; use std::collections::BTreeMap; -use serde::{Serialize, Deserialize, de::DeserializeOwned}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// A `ChainSpec` extension. /// /// This trait is implemented automatically by `ChainSpecGroup` macro. pub trait Group: Clone + Sized { /// An associated type containing fork definition. - type Fork: Fork; + type Fork: Fork; /// Convert to fork type. fn to_fork(self) -> Self::Fork; @@ -45,7 +47,7 @@ pub trait Group: Clone + Sized { /// a complete set of parameters pub trait Fork: Serialize + DeserializeOwned + Clone + Sized { /// A base `Group` type. - type Base: Group; + type Base: Group; /// Combine with another struct. /// @@ -128,7 +130,8 @@ pub trait Extension: Serialize + DeserializeOwned + Clone { fn get_any(&self, t: TypeId) -> &dyn Any; /// Get forkable extensions of specific type. - fn forks(&self) -> Option> where + fn forks(&self) -> Option> + where BlockNumber: Ord + Clone + 'static, T: Group + 'static, ::Extension: Extension, @@ -142,8 +145,12 @@ pub trait Extension: Serialize + DeserializeOwned + Clone { impl Extension for crate::NoExtension { type Forks = Self; - fn get(&self) -> Option<&T> { None } - fn get_any(&self, _t: TypeId) -> &dyn Any { self } + fn get(&self) -> Option<&T> { + None + } + fn get_any(&self, _t: TypeId) -> &dyn Any { + self + } } pub trait IsForks { @@ -166,14 +173,12 @@ pub struct Forks { impl Default for Forks { fn default() -> Self { - Self { - base: Default::default(), - forks: Default::default(), - } + Self { base: Default::default(), forks: Default::default() } } } -impl Forks where +impl Forks +where T::Fork: Debug, { /// Create new fork definition given the base and the forks. @@ -195,7 +200,8 @@ impl Forks where } } -impl IsForks for Forks where +impl IsForks for Forks +where B: Ord + 'static, T: Group + 'static, { @@ -203,29 +209,31 @@ impl IsForks for Forks where type Extension = T; } -impl Forks where +impl Forks +where T::Fork: Extension, { /// Get forks definition for a subset of this extension. /// /// Returns the `Forks` struct, but limited to a particular type /// within the extension. - pub fn for_type(&self) -> Option> where + pub fn for_type(&self) -> Option> + where X: Group + 'static, { let base = self.base.get::()?.clone(); - let forks = self.forks.iter().filter_map(|(k, v)| { - Some((k.clone(), v.get::>()?.clone()?)) - }).collect(); - - Some(Forks { - base, - forks, - }) + let forks = self + .forks + .iter() + .filter_map(|(k, v)| Some((k.clone(), v.get::>()?.clone()?))) + .collect(); + + Some(Forks { base, forks }) } } -impl Extension for Forks where +impl Extension for Forks +where B: Serialize + DeserializeOwned + Ord + Clone + 'static, E: Extension + Group + 'static, { @@ -245,7 +253,8 @@ impl Extension for Forks where } } - fn forks(&self) -> Option> where + fn forks(&self) -> Option> + where BlockNumber: Ord + Clone + 'static, T: Group + 'static, ::Extension: Extension, @@ -266,7 +275,7 @@ pub trait GetExtension { fn get_any(&self, t: TypeId) -> &dyn Any; } -impl GetExtension for E { +impl GetExtension for E { fn get_any(&self, t: TypeId) -> &dyn Any { Extension::get_any(self, t) } @@ -281,7 +290,7 @@ pub fn get_extension(e: &dyn GetExtension) -> Option<&T> { #[cfg(test)] mod tests { use super::*; - use sc_chain_spec_derive::{ChainSpecGroup, ChainSpecExtension}; + use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; // Make the proc macro work for tests and doc tests. use crate as sc_chain_spec; @@ -297,7 +306,9 @@ mod tests { pub test: u8, } - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] + #[derive( + Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension, + )] #[serde(deny_unknown_fields)] pub struct Extensions { pub ext1: Extension1, @@ -315,11 +326,12 @@ mod tests { #[test] fn forks_should_work_correctly() { - use super::Extension as _ ; + use super::Extension as _; // We first need to deserialize into a `Value` because of the following bug: // https://github.com/serde-rs/json/issues/505 - let ext_val: serde_json::Value = serde_json::from_str(r#" + let ext_val: serde_json::Value = serde_json::from_str( + r#" { "test": 11, "forkable": { @@ -342,40 +354,40 @@ mod tests { } } } - "#).unwrap(); + "#, + ) + .unwrap(); let ext: Ext2 = serde_json::from_value(ext_val).unwrap(); - assert_eq!(ext.get::(), Some(&Extension1 { - test: 11 - })); + assert_eq!(ext.get::(), Some(&Extension1 { test: 11 })); // get forks definition let forks = ext.get::>().unwrap(); - assert_eq!(forks.at_block(0), Extensions { - ext1: Extension1 { test: 15 }, - ext2: Extension2 { test: 123 }, - }); - assert_eq!(forks.at_block(1), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 123 }, - }); - assert_eq!(forks.at_block(2), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 5 }, - }); - assert_eq!(forks.at_block(4), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 5 }, - }); - assert_eq!(forks.at_block(5), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 1 }, - }); - assert_eq!(forks.at_block(10), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 1 }, - }); + assert_eq!( + forks.at_block(0), + Extensions { ext1: Extension1 { test: 15 }, ext2: Extension2 { test: 123 } } + ); + assert_eq!( + forks.at_block(1), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 123 } } + ); + assert_eq!( + forks.at_block(2), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 5 } } + ); + assert_eq!( + forks.at_block(4), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 5 } } + ); + assert_eq!( + forks.at_block(5), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 1 } } + ); + assert_eq!( + forks.at_block(10), + Extensions { ext1: Extension1 { test: 5 }, ext2: Extension2 { test: 1 } } + ); assert!(forks.at_block(10).get::().is_some()); // filter forks for `Extension2` diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index 1bfa1808ee556a3055caa0deec989854a15f8acf..ac580802a5d5c9d4d24ba5cd23adaeb7dd9ae18a 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -35,7 +35,7 @@ //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecExtension)] //! pub struct MyExtension { -//! pub known_blocks: HashMap, +//! pub known_blocks: HashMap, //! } //! //! pub type MyChainSpec = GenericChainSpec; @@ -53,19 +53,19 @@ //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] //! pub struct ClientParams { -//! max_block_size: usize, -//! max_extrinsic_size: usize, +//! max_block_size: usize, +//! max_extrinsic_size: usize, //! } //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup)] //! pub struct PoolParams { -//! max_transaction_size: usize, +//! max_transaction_size: usize, //! } //! //! #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, ChainSpecGroup, ChainSpecExtension)] //! pub struct Extension { -//! pub client: ClientParams, -//! pub pool: PoolParams, +//! pub client: ClientParams, +//! pub pool: PoolParams, //! } //! //! pub type BlockNumber = u64; @@ -88,20 +88,20 @@ //! //! #[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] //! pub struct ClientParams { -//! max_block_size: usize, -//! max_extrinsic_size: usize, +//! max_block_size: usize, +//! max_extrinsic_size: usize, //! } //! //! #[derive(Clone, Debug, Serialize, Deserialize, ChainSpecGroup)] //! pub struct PoolParams { -//! max_transaction_size: usize, +//! max_transaction_size: usize, //! } //! //! #[derive(Clone, Debug, Serialize, Deserialize, ChainSpecExtension)] //! pub struct Extension { -//! pub client: ClientParams, -//! #[forks] -//! pub pool: Forks, +//! pub client: ClientParams, +//! #[forks] +//! pub pool: Forks, //! } //! //! pub type MyChainSpec = GenericChainSpec; @@ -111,16 +111,16 @@ mod chain_spec; mod extension; pub use chain_spec::{ - ChainSpec as GenericChainSpec, NoExtension, LightSyncState, SerializableLightSyncState, + ChainSpec as GenericChainSpec, LightSyncState, NoExtension, SerializableLightSyncState, }; -pub use extension::{Group, Fork, Forks, Extension, GetExtension, get_extension}; +pub use extension::{get_extension, Extension, Fork, Forks, GetExtension, Group}; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; -use serde::{Serialize, de::DeserializeOwned}; -use sp_runtime::BuildStorage; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; +use serde::{de::DeserializeOwned, Serialize}; use sp_core::storage::Storage; +use sp_runtime::BuildStorage; /// The type of a chain. /// diff --git a/substrate/client/cli/src/arg_enums.rs b/substrate/client/cli/src/arg_enums.rs index d9a4210376298a94d18d1fd19f5359be0701a05b..83b1c57e071a492b978a225fc6b10279debd6c71 100644 --- a/substrate/client/cli/src/arg_enums.rs +++ b/substrate/client/cli/src/arg_enums.rs @@ -74,9 +74,8 @@ impl WasmExecutionMethod { impl Into for WasmExecutionMethod { fn into(self) -> sc_service::config::WasmExecutionMethod { match self { - WasmExecutionMethod::Interpreted => { - sc_service::config::WasmExecutionMethod::Interpreted - } + WasmExecutionMethod::Interpreted => + sc_service::config::WasmExecutionMethod::Interpreted, #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled, #[cfg(not(feature = "wasmtime"))] @@ -250,14 +249,10 @@ impl Into for SyncMode { fn into(self) -> sc_network::config::SyncMode { match self { SyncMode::Full => sc_network::config::SyncMode::Full, - SyncMode::Fast => sc_network::config::SyncMode::Fast { - skip_proofs: false, - storage_chain_mode: false, - }, - SyncMode::FastUnsafe => sc_network::config::SyncMode::Fast { - skip_proofs: true, - storage_chain_mode: false, - }, + SyncMode::Fast => + sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false }, + SyncMode::FastUnsafe => + sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false }, } } } diff --git a/substrate/client/cli/src/commands/build_spec_cmd.rs b/substrate/client/cli/src/commands/build_spec_cmd.rs index 78ad3b64724d9fb842b910e2b53f64b0edf17211..75fdf07643ee257bb6938bc1046b21cc02ab13e8 100644 --- a/substrate/client/cli/src/commands/build_spec_cmd.rs +++ b/substrate/client/cli/src/commands/build_spec_cmd.rs @@ -16,15 +16,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::NodeKeyParams; -use crate::params::SharedParams; -use crate::CliConfiguration; +use crate::{ + error, + params::{NodeKeyParams, SharedParams}, + CliConfiguration, +}; use log::info; use sc_network::config::build_multiaddr; -use sc_service::{config::{MultiaddrWithPeerId, NetworkConfiguration}, ChainSpec}; -use structopt::StructOpt; +use sc_service::{ + config::{MultiaddrWithPeerId, NetworkConfiguration}, + ChainSpec, +}; use std::io::Write; +use structopt::StructOpt; /// The `build-spec` command used to build a specification. #[derive(Debug, StructOpt, Clone)] diff --git a/substrate/client/cli/src/commands/check_block_cmd.rs b/substrate/client/cli/src/commands/check_block_cmd.rs index a47245de0f78c9acb8d37f6d939b73da3f642584..07a76319dca3ffa0802f6b6df47115f47e60cc48 100644 --- a/substrate/client/cli/src/commands/check_block_cmd.rs +++ b/substrate/client/cli/src/commands/check_block_cmd.rs @@ -17,7 +17,9 @@ // along with this program. If not, see . use crate::{ - CliConfiguration, error, params::{ImportParams, SharedParams, BlockNumberOrHash}, + error, + params::{BlockNumberOrHash, ImportParams, SharedParams}, + CliConfiguration, }; use sc_client_api::{BlockBackend, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -48,11 +50,7 @@ pub struct CheckBlockCmd { impl CheckBlockCmd { /// Run the check-block command - pub async fn run( - &self, - client: Arc, - import_queue: IQ, - ) -> error::Result<()> + pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> where B: BlockT + for<'de> serde::Deserialize<'de>, C: BlockBackend + UsageProvider + Send + Sync + 'static, diff --git a/substrate/client/cli/src/commands/export_blocks_cmd.rs b/substrate/client/cli/src/commands/export_blocks_cmd.rs index 4153c80a0545e78574bb1cb7a35ec3a3aa3b1a45..0ed8e3ff3591a4a26c6bc0192362d31e4ae900aa 100644 --- a/substrate/client/cli/src/commands/export_blocks_cmd.rs +++ b/substrate/client/cli/src/commands/export_blocks_cmd.rs @@ -16,21 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::{GenericNumber, DatabaseParams, PruningParams, SharedParams}; -use crate::CliConfiguration; -use log::info; -use sc_service::{ - config::DatabaseConfig, chain_ops::export_blocks, +use crate::{ + error, + params::{DatabaseParams, GenericNumber, PruningParams, SharedParams}, + CliConfiguration, }; +use log::info; use sc_client_api::{BlockBackend, UsageProvider}; +use sc_service::{chain_ops::export_blocks, config::DatabaseConfig}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::fmt::Debug; -use std::fs; -use std::io; -use std::path::PathBuf; -use std::str::FromStr; -use std::sync::Arc; +use std::{fmt::Debug, fs, io, path::PathBuf, str::FromStr, sync::Arc}; use structopt::StructOpt; /// The `export-blocks` command used to export blocks. @@ -95,9 +90,7 @@ impl ExportBlocksCmd { None => Box::new(io::stdout()), }; - export_blocks(client, file, from.into(), to, binary) - .await - .map_err(Into::into) + export_blocks(client, file, from.into(), to, binary).await.map_err(Into::into) } } diff --git a/substrate/client/cli/src/commands/export_state_cmd.rs b/substrate/client/cli/src/commands/export_state_cmd.rs index e154c3a502217df8186c2fffa6e9b95ba2c2684f..36eabd2c24f5c347f8e030f8339187e74bc3caf4 100644 --- a/substrate/client/cli/src/commands/export_state_cmd.rs +++ b/substrate/client/cli/src/commands/export_state_cmd.rs @@ -17,13 +17,15 @@ // along with this program. If not, see . use crate::{ - CliConfiguration, error, params::{PruningParams, SharedParams, BlockNumberOrHash}, + error, + params::{BlockNumberOrHash, PruningParams, SharedParams}, + CliConfiguration, }; use log::info; +use sc_client_api::{StorageProvider, UsageProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::{fmt::Debug, str::FromStr, io::Write, sync::Arc}; +use std::{fmt::Debug, io::Write, str::FromStr, sync::Arc}; use structopt::StructOpt; -use sc_client_api::{StorageProvider, UsageProvider}; /// The `export-state` command used to export the state of a given block into /// a chain spec. diff --git a/substrate/client/cli/src/commands/generate.rs b/substrate/client/cli/src/commands/generate.rs index 42214d2f5e45854884f565acc60d835bdd613b86..7032ebd72e0c7c5d4ced4016acc04faba1ee4837 100644 --- a/substrate/client/cli/src/commands/generate.rs +++ b/substrate/client/cli/src/commands/generate.rs @@ -16,12 +16,12 @@ // limitations under the License. //! Implementation of the `generate` subcommand -use bip39::{MnemonicType, Mnemonic, Language}; -use structopt::StructOpt; use crate::{ - utils::print_from_uri, KeystoreParams, Error, - with_crypto_scheme, NetworkSchemeFlag, OutputTypeFlag, CryptoSchemeFlag, + utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, + NetworkSchemeFlag, OutputTypeFlag, }; +use bip39::{Language, Mnemonic, MnemonicType}; +use structopt::StructOpt; /// The `generate` command #[derive(Debug, StructOpt, Clone)] @@ -52,12 +52,11 @@ impl GenerateCmd { /// Run the command pub fn run(&self) -> Result<(), Error> { let words = match self.words { - Some(words) => { - MnemonicType::for_word_count(words) - .map_err(|_| { - Error::Input("Invalid number of words given for phrase: must be 12/15/18/21/24".into()) - })? - }, + Some(words) => MnemonicType::for_word_count(words).map_err(|_| { + Error::Input( + "Invalid number of words given for phrase: must be 12/15/18/21/24".into(), + ) + })?, None => MnemonicType::Words12, }; let mnemonic = Mnemonic::new(words, Language::English); diff --git a/substrate/client/cli/src/commands/generate_node_key.rs b/substrate/client/cli/src/commands/generate_node_key.rs index ec22c6298adb6f3b2d000b2668734f7be45ca0e5..74a4197f366212d91f0cf5ceda3c570daa2a6403 100644 --- a/substrate/client/cli/src/commands/generate_node_key.rs +++ b/substrate/client/cli/src/commands/generate_node_key.rs @@ -18,9 +18,9 @@ //! Implementation of the `generate-node-key` subcommand use crate::Error; -use structopt::StructOpt; -use std::{path::PathBuf, fs}; use libp2p::identity::{ed25519 as libp2p_ed25519, PublicKey}; +use std::{fs, path::PathBuf}; +use structopt::StructOpt; /// The `generate-node-key` command #[derive(Debug, StructOpt)] @@ -59,15 +59,14 @@ impl GenerateNodeKeyCmd { #[cfg(test)] mod tests { use super::*; - use tempfile::Builder; use std::io::Read; + use tempfile::Builder; #[test] fn generate_node_key() { let mut file = Builder::new().prefix("keyfile").tempfile().unwrap(); let file_path = file.path().display().to_string(); - let generate = - GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", &file_path]); + let generate = GenerateNodeKeyCmd::from_iter(&["generate-node-key", "--file", &file_path]); assert!(generate.run().is_ok()); let mut buf = String::new(); assert!(file.read_to_string(&mut buf).is_ok()); diff --git a/substrate/client/cli/src/commands/import_blocks_cmd.rs b/substrate/client/cli/src/commands/import_blocks_cmd.rs index 89f70d06813ce2da2bec94460e87c8fa96a8ca7b..9b211b88d55631fccced99c9756341810533bfd2 100644 --- a/substrate/client/cli/src/commands/import_blocks_cmd.rs +++ b/substrate/client/cli/src/commands/import_blocks_cmd.rs @@ -16,19 +16,22 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::ImportParams; -use crate::params::SharedParams; -use crate::CliConfiguration; +use crate::{ + error, + params::{ImportParams, SharedParams}, + CliConfiguration, +}; +use sc_client_api::UsageProvider; use sc_service::chain_ops::import_blocks; use sp_runtime::traits::Block as BlockT; -use std::fmt::Debug; -use std::fs; -use std::io::{self, Read, Seek}; -use std::path::PathBuf; -use std::sync::Arc; +use std::{ + fmt::Debug, + fs, + io::{self, Read, Seek}, + path::PathBuf, + sync::Arc, +}; use structopt::StructOpt; -use sc_client_api::UsageProvider; /// The `import-blocks` command used to import blocks. #[derive(Debug, StructOpt)] @@ -63,11 +66,7 @@ impl ReadPlusSeek for T {} impl ImportBlocksCmd { /// Run the import-blocks command - pub async fn run( - &self, - client: Arc, - import_queue: IQ, - ) -> error::Result<()> + pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> where C: UsageProvider + Send + Sync + 'static, B: BlockT + for<'de> serde::Deserialize<'de>, @@ -79,7 +78,7 @@ impl ImportBlocksCmd { let mut buffer = Vec::new(); io::stdin().read_to_end(&mut buffer)?; Box::new(io::Cursor::new(buffer)) - } + }, }; import_blocks(client, import_queue, file, false, self.binary) diff --git a/substrate/client/cli/src/commands/insert_key.rs b/substrate/client/cli/src/commands/insert_key.rs index f166db85c15649225ea1b250d557e9359f8acd7a..05055dc53c1e2f3e11f81f4ea5aef302ca132370 100644 --- a/substrate/client/cli/src/commands/insert_key.rs +++ b/substrate/client/cli/src/commands/insert_key.rs @@ -18,22 +18,18 @@ //! Implementation of the `insert` subcommand use crate::{ - Error, KeystoreParams, CryptoSchemeFlag, SharedParams, utils, with_crypto_scheme, - SubstrateCli, + utils, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, SharedParams, SubstrateCli, }; -use std::{sync::Arc, convert::TryFrom}; -use structopt::StructOpt; -use sp_core::{crypto::KeyTypeId, crypto::SecretString}; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sc_keystore::LocalKeystore; -use sc_service::config::{KeystoreConfig, BasePath}; +use sc_service::config::{BasePath, KeystoreConfig}; +use sp_core::crypto::{KeyTypeId, SecretString}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use std::{convert::TryFrom, sync::Arc}; +use structopt::StructOpt; /// The `insert` command #[derive(Debug, StructOpt, Clone)] -#[structopt( - name = "insert", - about = "Insert a key to the keystore of a node." -)] +#[structopt(name = "insert", about = "Insert a key to the keystore of a node.")] pub struct InsertKeyCmd { /// The secret key URI. /// If the value is a file, the file content is used as URI. @@ -62,7 +58,8 @@ impl InsertKeyCmd { /// Run the command pub fn run(&self, cli: &C) -> Result<(), Error> { let suri = utils::read_uri(self.suri.as_ref())?; - let base_path = self.shared_params + let base_path = self + .shared_params .base_path() .unwrap_or_else(|| BasePath::from_project("", "", &C::executable_name())); let chain_id = self.shared_params.chain_id(self.shared_params.is_dev()); @@ -78,10 +75,11 @@ impl InsertKeyCmd { let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(path, password)?); (keystore, public) }, - _ => unreachable!("keystore_config always returns path and password; qed") + _ => unreachable!("keystore_config always returns path and password; qed"), }; - let key_type = KeyTypeId::try_from(self.key_type.as_str()).map_err(|_| Error::KeyTypeInvalid)?; + let key_type = + KeyTypeId::try_from(self.key_type.as_str()).map_err(|_| Error::KeyTypeInvalid)?; SyncCryptoStore::insert_unknown(&*keystore, key_type, &suri, &public[..]) .map_err(|_| Error::KeyStoreOperation)?; @@ -98,10 +96,10 @@ fn to_vec(uri: &str, pass: Option) -> Result std::result::Result, String> { - Ok( - Box::new( - GenericChainSpec::from_genesis( - "test", - "test_id", - ChainType::Development, - || unimplemented!("Not required in tests"), - Vec::new(), - None, - None, - None, - NoExtension::None, - ), - ), - ) + Ok(Box::new(GenericChainSpec::from_genesis( + "test", + "test_id", + ChainType::Development, + || unimplemented!("Not required in tests"), + Vec::new(), + None, + None, + None, + NoExtension::None, + ))) } } @@ -159,15 +153,20 @@ mod tests { let path_str = format!("{}", path.path().display()); let (key, uri, _) = Pair::generate_with_phrase(None); - let inspect = InsertKeyCmd::from_iter( - &["insert-key", "-d", &path_str, "--key-type", "test", "--suri", &uri], - ); + let inspect = InsertKeyCmd::from_iter(&[ + "insert-key", + "-d", + &path_str, + "--key-type", + "test", + "--suri", + &uri, + ]); assert!(inspect.run(&Cli).is_ok()); - let keystore = LocalKeystore::open( - path.path().join("chains").join("test_id").join("keystore"), - None, - ).unwrap(); + let keystore = + LocalKeystore::open(path.path().join("chains").join("test_id").join("keystore"), None) + .unwrap(); assert!(keystore.has_keys(&[(key.public().to_raw_vec(), KeyTypeId(*b"test"))])); } } diff --git a/substrate/client/cli/src/commands/inspect_key.rs b/substrate/client/cli/src/commands/inspect_key.rs index a60b6cd93a7605469493576e3bc05cad1e1c937f..277c9015f4daf0ddd0479b0216bdd6da0fd07207 100644 --- a/substrate/client/cli/src/commands/inspect_key.rs +++ b/substrate/client/cli/src/commands/inspect_key.rs @@ -18,8 +18,8 @@ //! Implementation of the `inspect` subcommand use crate::{ - utils::{self, print_from_uri, print_from_public}, KeystoreParams, - with_crypto_scheme, NetworkSchemeFlag, OutputTypeFlag, CryptoSchemeFlag, Error, + utils::{self, print_from_public, print_from_uri}, + with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, NetworkSchemeFlag, OutputTypeFlag, }; use structopt::StructOpt; /// The `inspect` command @@ -103,8 +103,7 @@ mod tests { "remember fiber forum demise paper uniform squirrel feel access exclude casual effort"; let seed = "0xad1fb77243b536b90cfe5f0d351ab1b1ac40e3890b41dc64f766ee56340cfca5"; - let inspect = - InspectKeyCmd::from_iter(&["inspect-key", words, "--password", "12345"]); + let inspect = InspectKeyCmd::from_iter(&["inspect-key", words, "--password", "12345"]); assert!(inspect.run().is_ok()); let inspect = InspectKeyCmd::from_iter(&["inspect-key", seed]); diff --git a/substrate/client/cli/src/commands/inspect_node_key.rs b/substrate/client/cli/src/commands/inspect_node_key.rs index 4db32aefb5fbb51f1cb8e3719f8900e54dc8b275..92a71f8975052a27d2205850b43c7528c117f3b7 100644 --- a/substrate/client/cli/src/commands/inspect_node_key.rs +++ b/substrate/client/cli/src/commands/inspect_node_key.rs @@ -18,9 +18,8 @@ //! Implementation of the `inspect-node-key` subcommand use crate::{Error, NetworkSchemeFlag}; -use std::fs; -use libp2p::identity::{PublicKey, ed25519}; -use std::path::PathBuf; +use libp2p::identity::{ed25519, PublicKey}; +use std::{fs, path::PathBuf}; use structopt::StructOpt; /// The `inspect-node-key` command @@ -42,10 +41,10 @@ pub struct InspectNodeKeyCmd { impl InspectNodeKeyCmd { /// runs the command pub fn run(&self) -> Result<(), Error> { - let mut file_content = hex::decode(fs::read(&self.file)?) - .map_err(|_| "failed to decode secret as hex")?; - let secret = ed25519::SecretKey::from_bytes(&mut file_content) - .map_err(|_| "Bad node key file")?; + let mut file_content = + hex::decode(fs::read(&self.file)?).map_err(|_| "failed to decode secret as hex")?; + let secret = + ed25519::SecretKey::from_bytes(&mut file_content).map_err(|_| "Bad node key file")?; let keypair = ed25519::Keypair::from(secret); let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); @@ -58,8 +57,7 @@ impl InspectNodeKeyCmd { #[cfg(test)] mod tests { - use super::*; - use super::super::GenerateNodeKeyCmd; + use super::{super::GenerateNodeKeyCmd, *}; #[test] fn inspect_node_key() { diff --git a/substrate/client/cli/src/commands/key.rs b/substrate/client/cli/src/commands/key.rs index 34602657da94544c9159ce637bf9f24e260e24c2..8e1103a8ca512a63ec230b4baeae90235255f671 100644 --- a/substrate/client/cli/src/commands/key.rs +++ b/substrate/client/cli/src/commands/key.rs @@ -21,11 +21,8 @@ use crate::{Error, SubstrateCli}; use structopt::StructOpt; use super::{ - insert_key::InsertKeyCmd, - inspect_key::InspectKeyCmd, - generate::GenerateCmd, - inspect_node_key::InspectNodeKeyCmd, - generate_node_key::GenerateNodeKeyCmd, + generate::GenerateCmd, generate_node_key::GenerateNodeKeyCmd, insert_key::InsertKeyCmd, + inspect_key::InspectKeyCmd, inspect_node_key::InspectNodeKeyCmd, }; /// Key utilities for the cli. diff --git a/substrate/client/cli/src/commands/mod.rs b/substrate/client/cli/src/commands/mod.rs index 8c0d6acd6a51159bc6ec83fe275895ff44a34a51..9e7c5689b49c8d19286b30e5ce95a74025b33b26 100644 --- a/substrate/client/cli/src/commands/mod.rs +++ b/substrate/client/cli/src/commands/mod.rs @@ -19,37 +19,26 @@ mod build_spec_cmd; mod check_block_cmd; mod export_blocks_cmd; mod export_state_cmd; -mod import_blocks_cmd; -mod purge_chain_cmd; -mod sign; -mod verify; -mod vanity; -mod revert_cmd; -mod run_cmd; -mod generate_node_key; mod generate; +mod generate_node_key; +mod import_blocks_cmd; mod insert_key; -mod inspect_node_key; mod inspect_key; +mod inspect_node_key; mod key; +mod purge_chain_cmd; +mod revert_cmd; +mod run_cmd; +mod sign; pub mod utils; +mod vanity; +mod verify; pub use self::{ - build_spec_cmd::BuildSpecCmd, - check_block_cmd::CheckBlockCmd, - export_blocks_cmd::ExportBlocksCmd, - export_state_cmd::ExportStateCmd, - import_blocks_cmd::ImportBlocksCmd, - purge_chain_cmd::PurgeChainCmd, - sign::SignCmd, - generate::GenerateCmd, - insert_key::InsertKeyCmd, - inspect_key::InspectKeyCmd, - generate_node_key::GenerateNodeKeyCmd, - inspect_node_key::InspectNodeKeyCmd, - key::KeySubcommand, - vanity::VanityCmd, - verify::VerifyCmd, - revert_cmd::RevertCmd, - run_cmd::RunCmd, + build_spec_cmd::BuildSpecCmd, check_block_cmd::CheckBlockCmd, + export_blocks_cmd::ExportBlocksCmd, export_state_cmd::ExportStateCmd, generate::GenerateCmd, + generate_node_key::GenerateNodeKeyCmd, import_blocks_cmd::ImportBlocksCmd, + insert_key::InsertKeyCmd, inspect_key::InspectKeyCmd, inspect_node_key::InspectNodeKeyCmd, + key::KeySubcommand, purge_chain_cmd::PurgeChainCmd, revert_cmd::RevertCmd, run_cmd::RunCmd, + sign::SignCmd, vanity::VanityCmd, verify::VerifyCmd, }; diff --git a/substrate/client/cli/src/commands/purge_chain_cmd.rs b/substrate/client/cli/src/commands/purge_chain_cmd.rs index c61e21a6a5ad0a3872be0a1b3263f3586cdfdeb5..590046aa779bcf72ae0021851dfd3c567a8c8677 100644 --- a/substrate/client/cli/src/commands/purge_chain_cmd.rs +++ b/substrate/client/cli/src/commands/purge_chain_cmd.rs @@ -16,13 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::{DatabaseParams, SharedParams}; -use crate::CliConfiguration; +use crate::{ + error, + params::{DatabaseParams, SharedParams}, + CliConfiguration, +}; use sc_service::DatabaseConfig; -use std::fmt::Debug; -use std::fs; -use std::io::{self, Write}; +use std::{ + fmt::Debug, + fs, + io::{self, Write}, +}; use structopt::StructOpt; /// The `purge-chain` command used to remove the whole chain. @@ -44,10 +48,9 @@ pub struct PurgeChainCmd { impl PurgeChainCmd { /// Run the purge command pub fn run(&self, database_config: DatabaseConfig) -> error::Result<()> { - let db_path = database_config.path() - .ok_or_else(|| - error::Error::Input("Cannot purge custom database implementation".into()) - )?; + let db_path = database_config.path().ok_or_else(|| { + error::Error::Input("Cannot purge custom database implementation".into()) + })?; if !self.yes { print!("Are you sure to remove {:?}? [y/N]: ", &db_path); @@ -61,7 +64,7 @@ impl PurgeChainCmd { Some('y') | Some('Y') => {}, _ => { println!("Aborted"); - return Ok(()); + return Ok(()) }, } } diff --git a/substrate/client/cli/src/commands/revert_cmd.rs b/substrate/client/cli/src/commands/revert_cmd.rs index 2745ce2c652417b53bbadb90ace51e2cf9d207fd..9ad49a03aa5fd533f07cb28c88e24721091c5ae8 100644 --- a/substrate/client/cli/src/commands/revert_cmd.rs +++ b/substrate/client/cli/src/commands/revert_cmd.rs @@ -16,16 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::params::{GenericNumber, PruningParams, SharedParams}; -use crate::CliConfiguration; +use crate::{ + error, + params::{GenericNumber, PruningParams, SharedParams}, + CliConfiguration, +}; +use sc_client_api::{Backend, UsageProvider}; use sc_service::chain_ops::revert_chain; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::fmt::Debug; -use std::str::FromStr; -use std::sync::Arc; +use std::{fmt::Debug, str::FromStr, sync::Arc}; use structopt::StructOpt; -use sc_client_api::{Backend, UsageProvider}; /// The `revert` command used revert the chain to a previous state. #[derive(Debug, StructOpt)] @@ -45,11 +45,7 @@ pub struct RevertCmd { impl RevertCmd { /// Run the revert command - pub async fn run( - &self, - client: Arc, - backend: Arc, - ) -> error::Result<()> + pub async fn run(&self, client: Arc, backend: Arc) -> error::Result<()> where B: BlockT, BA: Backend, diff --git a/substrate/client/cli/src/commands/run_cmd.rs b/substrate/client/cli/src/commands/run_cmd.rs index 285ffc9fdca16a94845767e878e1daed16002a76..2b5a3632543bd0ee09e0ef070796df501a84209b 100644 --- a/substrate/client/cli/src/commands/run_cmd.rs +++ b/substrate/client/cli/src/commands/run_cmd.rs @@ -16,15 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::arg_enums::RpcMethods; -use crate::error::{Error, Result}; -use crate::params::ImportParams; -use crate::params::KeystoreParams; -use crate::params::NetworkParams; -use crate::params::OffchainWorkerParams; -use crate::params::SharedParams; -use crate::params::TransactionPoolParams; -use crate::CliConfiguration; +use crate::{ + arg_enums::RpcMethods, + error::{Error, Result}, + params::{ + ImportParams, KeystoreParams, NetworkParams, OffchainWorkerParams, SharedParams, + TransactionPoolParams, + }, + CliConfiguration, +}; use regex::Regex; use sc_service::{ config::{BasePath, PrometheusConfig, TransactionPoolOptions}, @@ -308,7 +308,7 @@ impl CliConfiguration for RunCmd { Error::Input(format!( "Invalid node name '{}'. Reason: {}. If unsure, use none.", name, msg - )) + )) })?; Ok(name) @@ -363,18 +363,13 @@ impl CliConfiguration for RunCmd { Ok(if self.no_prometheus { None } else { - let interface = if self.prometheus_external { - Ipv4Addr::UNSPECIFIED - } else { - Ipv4Addr::LOCALHOST - }; - - Some(PrometheusConfig::new_with_default_registry( - SocketAddr::new( - interface.into(), - self.prometheus_port.unwrap_or(default_listen_port), - ) - )) + let interface = + if self.prometheus_external { Ipv4Addr::UNSPECIFIED } else { Ipv4Addr::LOCALHOST }; + + Some(PrometheusConfig::new_with_default_registry(SocketAddr::new( + interface.into(), + self.prometheus_port.unwrap_or(default_listen_port), + ))) }) } @@ -416,7 +411,7 @@ impl CliConfiguration for RunCmd { self.rpc_external, self.unsafe_rpc_external, self.rpc_methods, - self.validator + self.validator, )?; Ok(Some(SocketAddr::new(interface, self.rpc_port.unwrap_or(default_listen_port)))) @@ -466,19 +461,19 @@ impl CliConfiguration for RunCmd { pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { let name = _name.to_string(); if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { - return Err("Node name too long"); + return Err("Node name too long") } let invalid_chars = r"[\\.@]"; let re = Regex::new(invalid_chars).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'"); + return Err("Node name should not contain invalid chars such as '.' and '@'") } let invalid_patterns = r"(https?:\\/+)?(www)+"; let re = Regex::new(invalid_patterns).unwrap(); if re.is_match(&name) { - return Err("Node name should not contain urls"); + return Err("Node name should not contain urls") } Ok(()) @@ -497,7 +492,7 @@ fn rpc_interface( or `--rpc-methods=unsafe` if you understand the risks. See the options \ description for more information." .to_owned(), - )); + )) } if is_external || is_unsafe_external { @@ -537,11 +532,10 @@ fn parse_telemetry_endpoints(s: &str) -> std::result::Result<(String, u8), Telem None => Err(TelemetryParsingError::MissingVerbosity), Some(pos_) => { let url = s[..pos_].to_string(); - let verbosity = s[pos_ + 1..] - .parse() - .map_err(TelemetryParsingError::VerbosityParsingError)?; + let verbosity = + s[pos_ + 1..].parse().map_err(TelemetryParsingError::VerbosityParsingError)?; Ok((url, verbosity)) - } + }, } } @@ -574,17 +568,13 @@ fn parse_cors(s: &str) -> std::result::Result> match part { "all" | "*" => { is_all = true; - break; - } + break + }, other => origins.push(other.to_owned()), } } - Ok(if is_all { - Cors::All - } else { - Cors::List(origins) - }) + Ok(if is_all { Cors::All } else { Cors::List(origins) }) } #[cfg(test)] @@ -600,7 +590,8 @@ mod tests { fn tests_node_name_bad() { assert!(is_node_name_valid( "very very long names are really not very cool for the ui at all, really they're not" - ).is_err()); + ) + .is_err()); assert!(is_node_name_valid("Dots.not.Ok").is_err()); assert!(is_node_name_valid("http://visit.me").is_err()); assert!(is_node_name_valid("https://visit.me").is_err()); diff --git a/substrate/client/cli/src/commands/sign.rs b/substrate/client/cli/src/commands/sign.rs index 5d487861428fd5b7e5872eacd762d3a534667f44..20aacd9bf00206d0395bc248f06b4368ebf95165 100644 --- a/substrate/client/cli/src/commands/sign.rs +++ b/substrate/client/cli/src/commands/sign.rs @@ -5,7 +5,7 @@ // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -18,15 +18,12 @@ //! Implementation of the `sign` subcommand use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag, KeystoreParams}; -use structopt::StructOpt; use sp_core::crypto::SecretString; +use structopt::StructOpt; /// The `sign` command #[derive(Debug, StructOpt, Clone)] -#[structopt( - name = "sign", - about = "Sign a message, with a given (secret) key" -)] +#[structopt(name = "sign", about = "Sign a message, with a given (secret) key")] pub struct SignCmd { /// The secret key URI. /// If the value is a file, the file content is used as URI. @@ -52,7 +49,6 @@ pub struct SignCmd { pub crypto_scheme: CryptoSchemeFlag, } - impl SignCmd { /// Run the command pub fn run(&self) -> error::Result<()> { @@ -60,17 +56,19 @@ impl SignCmd { let suri = utils::read_uri(self.suri.as_ref())?; let password = self.keystore_params.read_password()?; - let signature = with_crypto_scheme!( - self.crypto_scheme.scheme, - sign(&suri, password, message) - )?; + let signature = + with_crypto_scheme!(self.crypto_scheme.scheme, sign(&suri, password, message))?; println!("{}", signature); Ok(()) } } -fn sign(suri: &str, password: Option, message: Vec) -> error::Result { +fn sign( + suri: &str, + password: Option, + message: Vec, +) -> error::Result { let pair = utils::pair_from_suri::

(suri, password)?; Ok(format!("{}", hex::encode(pair.sign(&message)))) } @@ -91,7 +89,7 @@ mod test { "--message", &seed[2..], "--password", - "12345" + "12345", ]); assert!(sign.run().is_ok()); } diff --git a/substrate/client/cli/src/commands/utils.rs b/substrate/client/cli/src/commands/utils.rs index 69372e624095ea6e851b5fbe56b27bb50b49e72a..fa783f7a95a5186838cee682ed2f326cba3a9cf1 100644 --- a/substrate/client/cli/src/commands/utils.rs +++ b/substrate/client/cli/src/commands/utils.rs @@ -22,9 +22,8 @@ use crate::{ OutputType, }; use serde_json::json; -use sp_core::crypto::{ExposeSecret, SecretString, Zeroize}; use sp_core::{ - crypto::{Ss58AddressFormat, Ss58Codec}, + crypto::{ExposeSecret, SecretString, Ss58AddressFormat, Ss58Codec, Zeroize}, hexdisplay::HexDisplay, Pair, }; @@ -88,7 +87,7 @@ pub fn print_from_uri( "{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed") ); - } + }, OutputType::Text => { println!( "Secret phrase `{}` is account:\n \ @@ -102,12 +101,9 @@ pub fn print_from_uri( format_public_key::(public_key.clone()), public_key.to_ss58check_with_version(network_override), format_account_id::(public_key), - pair.public() - .into() - .into_account() - .to_ss58check_with_version(network_override), + pair.public().into().into_account().to_ss58check_with_version(network_override), ); - } + }, } } else if let Ok((pair, seed)) = Pair::from_string_with_seed(uri, password.clone()) { let public_key = pair.public(); @@ -127,7 +123,7 @@ pub fn print_from_uri( "{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed") ); - } + }, OutputType::Text => { println!( "Secret Key URI `{}` is account:\n \ @@ -137,20 +133,13 @@ pub fn print_from_uri( Account ID: {}\n \ SS58 Address: {}", uri, - if let Some(seed) = seed { - format_seed::(seed) - } else { - "n/a".into() - }, + if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, format_public_key::(public_key.clone()), public_key.to_ss58check_with_version(network_override), format_account_id::(public_key), - pair.public() - .into() - .into_account() - .to_ss58check_with_version(network_override), + pair.public().into().into_account().to_ss58check_with_version(network_override), ); - } + }, } } else if let Ok((public_key, network)) = Pair::Public::from_string_with_version(uri) { let network_override = network_override.unwrap_or(network); @@ -170,7 +159,7 @@ pub fn print_from_uri( "{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed") ); - } + }, OutputType::Text => { println!( "Public Key URI `{}` is account:\n \ @@ -186,7 +175,7 @@ pub fn print_from_uri( format_account_id::(public_key.clone()), public_key.to_ss58check_with_version(network_override), ); - } + }, } } else { println!("Invalid phrase/URI given"); @@ -220,11 +209,8 @@ where "ss58Address": public_key.to_ss58check_with_version(network_override), }); - println!( - "{}", - serde_json::to_string_pretty(&json).expect("Json pretty print failed") - ); - } + println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); + }, OutputType::Text => { println!( "Network ID/version: {}\n \ @@ -238,7 +224,7 @@ where format_account_id::(public_key.clone()), public_key.to_ss58check_with_version(network_override), ); - } + }, } Ok(()) @@ -273,10 +259,7 @@ fn format_account_id(public_key: PublicFor

) -> String where PublicFor

: Into, { - format!( - "0x{}", - HexDisplay::from(&public_key.into().into_account().as_ref()) - ) + format!("0x{}", HexDisplay::from(&public_key.into().into_account().as_ref())) } /// helper method for decoding hex @@ -294,13 +277,13 @@ pub fn read_message(msg: Option<&String>, should_decode: bool) -> Result match msg { Some(m) => { message = decode_hex(m)?; - } + }, None => { std::io::stdin().lock().read_to_end(&mut message)?; if should_decode { message = decode_hex(&message)?; } - } + }, } Ok(message) } diff --git a/substrate/client/cli/src/commands/vanity.rs b/substrate/client/cli/src/commands/vanity.rs index ce1f079db8789ce40bdf0c2a5154d887cb115c73..daeb81e86a1a18d1b593c8770f5d8ed7963f5191 100644 --- a/substrate/client/cli/src/commands/vanity.rs +++ b/substrate/client/cli/src/commands/vanity.rs @@ -5,7 +5,7 @@ // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -19,21 +19,17 @@ //! implementation of the `vanity` subcommand use crate::{ - error, utils, with_crypto_scheme, - CryptoSchemeFlag, NetworkSchemeFlag, OutputTypeFlag, + error, utils, with_crypto_scheme, CryptoSchemeFlag, NetworkSchemeFlag, OutputTypeFlag, }; -use sp_core::crypto::{Ss58Codec, Ss58AddressFormat}; -use structopt::StructOpt; use rand::{rngs::OsRng, RngCore}; +use sp_core::crypto::{Ss58AddressFormat, Ss58Codec}; use sp_runtime::traits::IdentifyAccount; +use structopt::StructOpt; use utils::print_from_uri; /// The `vanity` command #[derive(Debug, StructOpt, Clone)] -#[structopt( - name = "vanity", - about = "Generate a seed that provides a vanity address" -)] +#[structopt(name = "vanity", about = "Generate a seed that provides a vanity address")] pub struct VanityCmd { /// Desired pattern #[structopt(long, parse(try_from_str = assert_non_empty_string))] @@ -78,10 +74,10 @@ fn generate_key( desired: &str, network_override: Ss58AddressFormat, ) -> Result - where - Pair: sp_core::Pair, - Pair::Public: IdentifyAccount, - ::AccountId: Ss58Codec, +where + Pair: sp_core::Pair, + Pair::Public: IdentifyAccount, + ::AccountId: Ss58Codec, { println!("Generating key containing pattern '{}'", desired); @@ -104,7 +100,7 @@ fn generate_key( best = score; if best >= top { println!("best: {} == top: {}", best, top); - return Ok(utils::format_seed::(seed.clone())); + return Ok(utils::format_seed::(seed.clone())) } } done += 1; @@ -129,11 +125,11 @@ fn next_seed(seed: &mut [u8]) { match seed[i] { 255 => { seed[i] = 0; - } + }, _ => { seed[i] += 1; - break; - } + break + }, } } } @@ -145,7 +141,7 @@ fn calculate_score(_desired: &str, key: &str) -> usize { let snip_size = _desired.len() - truncate; let truncated = &_desired[0..snip_size]; if let Some(pos) = key.find(truncated) { - return (47 - pos) + (snip_size * 48); + return (47 - pos) + (snip_size * 48) } } 0 @@ -160,15 +156,13 @@ fn assert_non_empty_string(pattern: &str) -> Result { } } - #[cfg(test)] mod tests { use super::*; - use sp_core::{crypto::Ss58Codec, Pair}; - use sp_core::sr25519; + use sp_core::{crypto::Ss58Codec, sr25519, Pair}; + use structopt::StructOpt; #[cfg(feature = "bench")] use test::Bencher; - use structopt::StructOpt; #[test] fn vanity() { @@ -179,25 +173,21 @@ mod tests { #[test] fn test_generation_with_single_char() { let seed = generate_key::("ab", Default::default()).unwrap(); - assert!( - sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) - .unwrap() - .public() - .to_ss58check() - .contains("ab") - ); + assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + .unwrap() + .public() + .to_ss58check() + .contains("ab")); } #[test] fn generate_key_respects_network_override() { let seed = generate_key::("ab", Ss58AddressFormat::PolkadotAccount).unwrap(); - assert!( - sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) - .unwrap() - .public() - .to_ss58check_with_version(Ss58AddressFormat::PolkadotAccount) - .contains("ab") - ); + assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) + .unwrap() + .public() + .to_ss58check_with_version(Ss58AddressFormat::PolkadotAccount) + .contains("ab")); } #[test] @@ -208,10 +198,7 @@ mod tests { #[test] fn test_score_100() { - let score = calculate_score( - "Polkadot", - "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim", - ); + let score = calculate_score("Polkadot", "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); assert_eq!(score, 430); } @@ -219,10 +206,7 @@ mod tests { fn test_score_50_2() { // 50% for the position + 50% for the size assert_eq!( - calculate_score( - "Polkadot", - "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim" - ), + calculate_score("Polkadot", "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"), 238 ); } @@ -230,10 +214,7 @@ mod tests { #[test] fn test_score_0() { assert_eq!( - calculate_score( - "Polkadot", - "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK" - ), + calculate_score("Polkadot", "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK"), 0 ); } diff --git a/substrate/client/cli/src/commands/verify.rs b/substrate/client/cli/src/commands/verify.rs index c6ce3ef9d69c8855f43c92a83d6c3ed8af827381..760793374242e792cefe2d48627323e2208f89b3 100644 --- a/substrate/client/cli/src/commands/verify.rs +++ b/substrate/client/cli/src/commands/verify.rs @@ -5,7 +5,7 @@ // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or +// the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, @@ -19,7 +19,7 @@ //! implementation of the `verify` subcommand use crate::{error, utils, with_crypto_scheme, CryptoSchemeFlag}; -use sp_core::{Public, crypto::Ss58Codec}; +use sp_core::{crypto::Ss58Codec, Public}; use structopt::StructOpt; /// The `verify` command @@ -57,32 +57,23 @@ impl VerifyCmd { let message = utils::read_message(self.message.as_ref(), self.hex)?; let sig_data = utils::decode_hex(&self.sig)?; let uri = utils::read_uri(self.uri.as_ref())?; - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - &uri - }; - - with_crypto_scheme!( - self.crypto_scheme.scheme, - verify(sig_data, message, uri) - ) + let uri = if uri.starts_with("0x") { &uri[2..] } else { &uri }; + + with_crypto_scheme!(self.crypto_scheme.scheme, verify(sig_data, message, uri)) } } fn verify(sig_data: Vec, message: Vec, uri: &str) -> error::Result<()> - where - Pair: sp_core::Pair, - Pair::Signature: Default + AsMut<[u8]>, +where + Pair: sp_core::Pair, + Pair::Signature: Default + AsMut<[u8]>, { let mut signature = Pair::Signature::default(); if sig_data.len() != signature.as_ref().len() { - return Err( - error::Error::SignatureInvalidLength { - read: sig_data.len(), - expected: signature.as_ref().len(), - } - ); + return Err(error::Error::SignatureInvalidLength { + read: sig_data.len(), + expected: signature.as_ref().len(), + }) } signature.as_mut().copy_from_slice(&sig_data); diff --git a/substrate/client/cli/src/config.rs b/substrate/client/cli/src/config.rs index 8e435da253c04cbf2fbc1bc8421f9a4eaa66319c..d586156410507b9926910a0da49d47ee7b3eef9b 100644 --- a/substrate/client/cli/src/config.rs +++ b/substrate/client/cli/src/config.rs @@ -18,24 +18,24 @@ //! Configuration trait for a CLI based on substrate -use crate::arg_enums::Database; -use crate::error::Result; use crate::{ - DatabaseParams, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, - OffchainWorkerParams, PruningParams, SharedParams, SubstrateCli, + arg_enums::Database, error::Result, DatabaseParams, ImportParams, KeystoreParams, + NetworkParams, NodeKeyParams, OffchainWorkerParams, PruningParams, SharedParams, SubstrateCli, }; use log::warn; use names::{Generator, Name}; use sc_client_api::execution_extensions::ExecutionStrategies; -use sc_service::config::{ - BasePath, Configuration, DatabaseConfig, ExtTransport, KeystoreConfig, NetworkConfiguration, - NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, Role, RpcMethods, - TaskExecutor, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, +use sc_service::{ + config::{ + BasePath, Configuration, DatabaseConfig, ExtTransport, KeystoreConfig, + NetworkConfiguration, NodeKeyConfig, OffchainWorkerConfig, PrometheusConfig, PruningMode, + Role, RpcMethods, TaskExecutor, TelemetryEndpoints, TransactionPoolOptions, + WasmExecutionMethod, + }, + ChainSpec, KeepBlocks, TracingReceiver, TransactionStorageMode, }; -use sc_service::{ChainSpec, TracingReceiver, KeepBlocks, TransactionStorageMode}; use sc_tracing::logging::LoggerBuilder; -use std::net::SocketAddr; -use std::path::PathBuf; +use std::{net::SocketAddr, path::PathBuf}; /// The maximum number of characters for a node name. pub(crate) const NODE_NAME_MAX_LENGTH: usize = 64; @@ -178,12 +178,7 @@ pub trait CliConfiguration: Sized { default_listen_port, ) } else { - NetworkConfiguration::new( - node_name, - client_id, - node_key, - Some(net_config_dir), - ) + NetworkConfiguration::new(node_name, client_id, node_key, Some(net_config_dir)) }) } @@ -201,14 +196,13 @@ pub trait CliConfiguration: Sized { /// /// By default this is retrieved from `DatabaseParams` if it is available. Otherwise its `None`. fn database_cache_size(&self) -> Result> { - Ok(self.database_params() - .map(|x| x.database_cache_size()) - .unwrap_or_default()) + Ok(self.database_params().map(|x| x.database_cache_size()).unwrap_or_default()) } /// Get the database transaction storage scheme. fn database_transaction_storage(&self) -> Result { - Ok(self.database_params() + Ok(self + .database_params() .map(|x| x.transaction_storage()) .unwrap_or(TransactionStorageMode::BlockBody)) } @@ -228,13 +222,8 @@ pub trait CliConfiguration: Sized { database: Database, ) -> Result { Ok(match database { - Database::RocksDb => DatabaseConfig::RocksDb { - path: base_path.join("db"), - cache_size, - }, - Database::ParityDb => DatabaseConfig::ParityDb { - path: base_path.join("paritydb"), - }, + Database::RocksDb => DatabaseConfig::RocksDb { path: base_path.join("db"), cache_size }, + Database::ParityDb => DatabaseConfig::ParityDb { path: base_path.join("paritydb") }, }) } @@ -242,9 +231,7 @@ pub trait CliConfiguration: Sized { /// /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. fn state_cache_size(&self) -> Result { - Ok(self.import_params() - .map(|x| x.state_cache_size()) - .unwrap_or_default()) + Ok(self.import_params().map(|x| x.state_cache_size()).unwrap_or_default()) } /// Get the state cache child ratio (if any). @@ -293,18 +280,14 @@ pub trait CliConfiguration: Sized { /// By default this is retrieved from `ImportParams` if it is available. Otherwise its /// `WasmExecutionMethod::default()`. fn wasm_method(&self) -> Result { - Ok(self.import_params() - .map(|x| x.wasm_method()) - .unwrap_or_default()) + Ok(self.import_params().map(|x| x.wasm_method()).unwrap_or_default()) } /// Get the path where WASM overrides live. /// /// By default this is `None`. fn wasm_runtime_overrides(&self) -> Option { - self.import_params() - .map(|x| x.wasm_runtime_overrides()) - .unwrap_or_default() + self.import_params().map(|x| x.wasm_runtime_overrides()).unwrap_or_default() } /// Get the execution strategies. @@ -502,10 +485,7 @@ pub trait CliConfiguration: Sized { let (keystore_remote, keystore) = self.keystore_config(&config_dir)?; let telemetry_endpoints = self.telemetry_endpoints(&chain_spec)?; - let unsafe_pruning = self - .import_params() - .map(|p| p.unsafe_pruning) - .unwrap_or(false); + let unsafe_pruning = self.import_params().map(|p| p.unsafe_pruning).unwrap_or(false); Ok(Configuration { impl_name: C::impl_name(), @@ -628,7 +608,7 @@ pub fn generate_node_name() -> String { let count = node_name.chars().count(); if count < NODE_NAME_MAX_LENGTH { - return node_name; + return node_name } } } diff --git a/substrate/client/cli/src/lib.rs b/substrate/client/cli/src/lib.rs index e170d1a196ffcea5263209ea4743c9848e4329c6..0d5051bc113e25a185e48c3394938d98e23d4a71 100644 --- a/substrate/client/cli/src/lib.rs +++ b/substrate/client/cli/src/lib.rs @@ -159,7 +159,7 @@ pub trait SubstrateCli: Sized { let _ = std::io::stdout().write_all(e.message.as_bytes()); std::process::exit(0); } - } + }, }; ::from_clap(&matches) diff --git a/substrate/client/cli/src/params/database_params.rs b/substrate/client/cli/src/params/database_params.rs index d468f155555624f372b5bb81ea178aaa32c9d59c..4d6cf5f1d3674e086fb166a81977cd85479e1e98 100644 --- a/substrate/client/cli/src/params/database_params.rs +++ b/substrate/client/cli/src/params/database_params.rs @@ -17,8 +17,8 @@ // along with this program. If not, see . use crate::arg_enums::Database; -use structopt::StructOpt; use sc_service::TransactionStorageMode; +use structopt::StructOpt; /// Parameters for block import. #[derive(Debug, StructOpt, Clone)] diff --git a/substrate/client/cli/src/params/import_params.rs b/substrate/client/cli/src/params/import_params.rs index a62ec98a97029f98630669727fcf03d18765dbd8..9248e210eb6628ccfa18c9c4eccacf4e3873ec2a 100644 --- a/substrate/client/cli/src/params/import_params.rs +++ b/substrate/client/cli/src/params/import_params.rs @@ -16,16 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::arg_enums::{ - ExecutionStrategy, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, - DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, - DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, +use crate::{ + arg_enums::{ + ExecutionStrategy, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_IMPORT_BLOCK_VALIDATOR, + DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, + }, + params::{DatabaseParams, PruningParams}, }; -use crate::params::DatabaseParams; -use crate::params::PruningParams; use sc_client_api::execution_extensions::ExecutionStrategies; -use structopt::StructOpt; use std::path::PathBuf; +use structopt::StructOpt; #[cfg(feature = "wasmtime")] const WASM_METHOD_DEFAULT: &str = "Compiled"; @@ -73,11 +74,7 @@ pub struct ImportParams { pub execution_strategies: ExecutionStrategiesParams, /// Specify the state cache size. - #[structopt( - long = "state-cache-size", - value_name = "Bytes", - default_value = "67108864" - )] + #[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")] pub state_cache_size: usize, } @@ -102,11 +99,7 @@ impl ImportParams { pub fn execution_strategies(&self, is_dev: bool, is_validator: bool) -> ExecutionStrategies { let exec = &self.execution_strategies; let exec_all_or = |strat: Option, default: ExecutionStrategy| { - let default = if is_dev { - ExecutionStrategy::Native - } else { - default - }; + let default = if is_dev { ExecutionStrategy::Native } else { default }; exec.execution.unwrap_or_else(|| strat.unwrap_or(default)).into() }; @@ -120,10 +113,14 @@ impl ImportParams { ExecutionStrategies { syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), importing: exec_all_or(exec.execution_import_block, default_execution_import_block), - block_construction: - exec_all_or(exec.execution_block_construction, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION), - offchain_worker: - exec_all_or(exec.execution_offchain_worker, DEFAULT_EXECUTION_OFFCHAIN_WORKER), + block_construction: exec_all_or( + exec.execution_block_construction, + DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + ), + offchain_worker: exec_all_or( + exec.execution_offchain_worker, + DEFAULT_EXECUTION_OFFCHAIN_WORKER, + ), other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), } } diff --git a/substrate/client/cli/src/params/keystore_params.rs b/substrate/client/cli/src/params/keystore_params.rs index 2975c9bf5041f0df92e5b1b390112956c1984248..4eb5e5dc6c2d27435d0b81aff7415063face13ec 100644 --- a/substrate/client/cli/src/params/keystore_params.rs +++ b/substrate/client/cli/src/params/keystore_params.rs @@ -16,12 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error::Result; +use crate::{error, error::Result}; use sc_service::config::KeystoreConfig; -use std::{fs, path::{PathBuf, Path}}; -use structopt::StructOpt; -use crate::error; use sp_core::crypto::SecretString; +use std::{ + fs, + path::{Path, PathBuf}, +}; +use structopt::StructOpt; /// default sub directory for the key store const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; @@ -81,8 +83,7 @@ impl KeystoreParams { #[cfg(target_os = "unknown")] None } else if let Some(ref file) = self.password_filename { - let password = fs::read_to_string(file) - .map_err(|e| format!("{}", e))?; + let password = fs::read_to_string(file).map_err(|e| format!("{}", e))?; Some(SecretString::new(password)) } else { self.password.clone() diff --git a/substrate/client/cli/src/params/mod.rs b/substrate/client/cli/src/params/mod.rs index 0769e5a87adcb2245173c9cd26cb3ae021704bd0..431e1750b2b8acde18f709a9151e854d29bfb678 100644 --- a/substrate/client/cli/src/params/mod.rs +++ b/substrate/client/cli/src/params/mod.rs @@ -25,21 +25,20 @@ mod pruning_params; mod shared_params; mod transaction_pool_params; -use std::{fmt::Debug, str::FromStr, convert::TryFrom}; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, NumberFor}}; +use crate::arg_enums::{CryptoScheme, OutputType}; use sp_core::crypto::Ss58AddressFormat; -use crate::arg_enums::{OutputType, CryptoScheme}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; +use std::{convert::TryFrom, fmt::Debug, str::FromStr}; use structopt::StructOpt; -pub use crate::params::database_params::*; -pub use crate::params::import_params::*; -pub use crate::params::keystore_params::*; -pub use crate::params::network_params::*; -pub use crate::params::node_key_params::*; -pub use crate::params::offchain_worker_params::*; -pub use crate::params::pruning_params::*; -pub use crate::params::shared_params::*; -pub use crate::params::transaction_pool_params::*; +pub use crate::params::{ + database_params::*, import_params::*, keystore_params::*, network_params::*, + node_key_params::*, offchain_worker_params::*, pruning_params::*, shared_params::*, + transaction_pool_params::*, +}; /// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal. #[derive(Debug, Clone)] @@ -50,10 +49,7 @@ impl FromStr for GenericNumber { fn from_str(block_number: &str) -> Result { if let Some(pos) = block_number.chars().position(|d| !d.is_digit(10)) { - Err(format!( - "Expected block number, found illegal digit at position: {}", - pos, - )) + Err(format!("Expected block number, found illegal digit at position: {}", pos,)) } else { Ok(Self(block_number.to_owned())) } @@ -66,9 +62,9 @@ impl GenericNumber { /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate /// documentation. pub fn parse(&self) -> Result - where - N: FromStr, - N::Err: std::fmt::Debug, + where + N: FromStr, + N::Err: std::fmt::Debug, { FromStr::from_str(&self.0).map_err(|e| format!("Failed to parse block number: {:?}", e)) } @@ -109,7 +105,7 @@ impl BlockNumberOrHash { if self.0.starts_with("0x") { Ok(BlockId::Hash( FromStr::from_str(&self.0[2..]) - .map_err(|e| format!("Failed to parse block hash: {:?}", e))? + .map_err(|e| format!("Failed to parse block hash: {:?}", e))?, )) } else { GenericNumber(self.0.clone()).parse().map(BlockId::Number) @@ -117,7 +113,6 @@ impl BlockNumberOrHash { } } - /// Optional flag for specifying crypto algorithm #[derive(Debug, StructOpt, Clone)] pub struct CryptoSchemeFlag { diff --git a/substrate/client/cli/src/params/network_params.rs b/substrate/client/cli/src/params/network_params.rs index 69f4c9d1ba74bba06d211052bd018ed8cbd35e0c..185a93f66b3d2f2b50cf59b64154cf30d5645e0b 100644 --- a/substrate/client/cli/src/params/network_params.rs +++ b/substrate/client/cli/src/params/network_params.rs @@ -16,13 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::params::node_key_params::NodeKeyParams; -use crate::arg_enums::SyncMode; +use crate::{arg_enums::SyncMode, params::node_key_params::NodeKeyParams}; use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig}, + config::{ + NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, SetConfig, TransportConfig, + }, multiaddr::Protocol, }; -use sc_service::{ChainSpec, ChainType, config::{Multiaddr, MultiaddrWithPeerId}}; +use sc_service::{ + config::{Multiaddr, MultiaddrWithPeerId}, + ChainSpec, ChainType, +}; use std::{borrow::Cow, path::PathBuf}; use structopt::StructOpt; @@ -97,11 +101,7 @@ pub struct NetworkParams { /// /// This allows downloading announced blocks from multiple peers. Decrease to save /// traffic and risk increased latency. - #[structopt( - long = "max-parallel-downloads", - value_name = "COUNT", - default_value = "5" - )] + #[structopt(long = "max-parallel-downloads", value_name = "COUNT", default_value = "5")] pub max_parallel_downloads: u32, #[allow(missing_docs)] @@ -184,15 +184,16 @@ impl NetworkParams { let chain_type = chain_spec.chain_type(); // Activate if the user explicitly requested local discovery, `--dev` is given or the // chain type is `Local`/`Development` - let allow_non_globals_in_dht = self.discover_local - || is_dev - || matches!(chain_type, ChainType::Local | ChainType::Development); + let allow_non_globals_in_dht = + self.discover_local || + is_dev || matches!(chain_type, ChainType::Local | ChainType::Development); let allow_private_ipv4 = match (self.allow_private_ipv4, self.no_private_ipv4) { (true, true) => unreachable!("`*_private_ipv4` flags are mutually exclusive; qed"), (true, false) => true, (false, true) => false, - (false, false) => is_dev || matches!(chain_type, ChainType::Local | ChainType::Development), + (false, false) => + is_dev || matches!(chain_type, ChainType::Local | ChainType::Development), }; NetworkConfiguration { diff --git a/substrate/client/cli/src/params/node_key_params.rs b/substrate/client/cli/src/params/node_key_params.rs index d5823341aa69208e6e81c43c46589aac2e383320..bc5606752a88fe0369dd8f87ef86237606d39282 100644 --- a/substrate/client/cli/src/params/node_key_params.rs +++ b/substrate/client/cli/src/params/node_key_params.rs @@ -16,13 +16,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_network::{config::identity::ed25519, config::NodeKeyConfig}; +use sc_network::config::{identity::ed25519, NodeKeyConfig}; use sp_core::H256; use std::{path::PathBuf, str::FromStr}; use structopt::StructOpt; -use crate::arg_enums::NodeKeyType; -use crate::error; +use crate::{arg_enums::NodeKeyType, error}; /// The file name of the node's Ed25519 secret key inside the chain-specific /// network config directory, if neither `--node-key` nor `--node-key-file` @@ -103,12 +102,12 @@ impl NodeKeyParams { sc_network::config::Secret::File( self.node_key_file .clone() - .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)) + .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)), ) }; NodeKeyConfig::Ed25519(secret) - } + }, }) } } @@ -120,13 +119,11 @@ fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { /// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`. fn parse_ed25519_secret(hex: &str) -> error::Result { - H256::from_str(&hex) - .map_err(invalid_node_key) - .and_then(|bytes| { - ed25519::SecretKey::from_bytes(bytes) - .map(sc_network::config::Secret::Input) - .map_err(invalid_node_key) - }) + H256::from_str(&hex).map_err(invalid_node_key).and_then(|bytes| { + ed25519::SecretKey::from_bytes(bytes) + .map(sc_network::config::Secret::Input) + .map_err(invalid_node_key) + }) } #[cfg(test)] @@ -151,9 +148,7 @@ mod tests { params.node_key(net_config_dir).and_then(|c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => - { - Ok(()) - } + Ok(()), _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) @@ -171,14 +166,14 @@ mod tests { node_key_file: Some(file), }; - let node_key = params.node_key(&PathBuf::from("not-used")) + let node_key = params + .node_key(&PathBuf::from("not-used")) .expect("Creates node key config") .into_keypair() .expect("Creates node key pair"); match node_key { - Keypair::Ed25519(ref pair) - if pair.secret().as_ref() == key.as_ref() => {} + Keypair::Ed25519(ref pair) if pair.secret().as_ref() == key.as_ref() => {}, _ => panic!("Invalid key"), } } @@ -202,11 +197,7 @@ mod tests { { NodeKeyType::variants().iter().try_for_each(|t| { let node_key_type = NodeKeyType::from_str(t).unwrap(); - f(NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: None, - }) + f(NodeKeyParams { node_key_type, node_key: None, node_key_file: None }) }) } @@ -214,17 +205,12 @@ mod tests { with_def_params(|params| { let dir = PathBuf::from(net_config_dir.clone()); let typ = params.node_key_type; - params - .node_key(net_config_dir) - .and_then(move |c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if typ == NodeKeyType::Ed25519 - && f == &dir.join(NODE_KEY_ED25519_FILE) => - { - Ok(()) - } - _ => Err(error::Error::Input("Unexpected node key config".into())), - }) + params.node_key(net_config_dir).and_then(move |c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => + Ok(()), + _ => Err(error::Error::Input("Unexpected node key config".into())), + }) }) } diff --git a/substrate/client/cli/src/params/offchain_worker_params.rs b/substrate/client/cli/src/params/offchain_worker_params.rs index a6d65e4027a25ea2eae809b6f6729945a30d7524..685328ef17795e73aa7515874b7393bfd6955283 100644 --- a/substrate/client/cli/src/params/offchain_worker_params.rs +++ b/substrate/client/cli/src/params/offchain_worker_params.rs @@ -27,8 +27,7 @@ use sc_network::config::Role; use sc_service::config::OffchainWorkerConfig; use structopt::StructOpt; -use crate::error; -use crate::OffchainWorkerEnabled; +use crate::{error, OffchainWorkerEnabled}; /// Offchain worker related parameters. #[derive(Debug, StructOpt, Clone)] @@ -49,10 +48,7 @@ pub struct OffchainWorkerParams { /// /// Enables a runtime to write directly to a offchain workers /// DB during block import. - #[structopt( - long = "enable-offchain-indexing", - value_name = "ENABLE_OFFCHAIN_INDEXING" - )] + #[structopt(long = "enable-offchain-indexing", value_name = "ENABLE_OFFCHAIN_INDEXING")] pub indexing_enabled: bool, } @@ -67,9 +63,6 @@ impl OffchainWorkerParams { }; let indexing_enabled = self.indexing_enabled; - Ok(OffchainWorkerConfig { - enabled, - indexing_enabled, - }) + Ok(OffchainWorkerConfig { enabled, indexing_enabled }) } } diff --git a/substrate/client/cli/src/params/pruning_params.rs b/substrate/client/cli/src/params/pruning_params.rs index 32abaa9a755b4c2aac7a82e55135051de2cec89c..28c7fa301cc6062bb37e35c665b1bef95224622f 100644 --- a/substrate/client/cli/src/params/pruning_params.rs +++ b/substrate/client/cli/src/params/pruning_params.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::error; -use sc_service::{PruningMode, Role, KeepBlocks}; +use sc_service::{KeepBlocks, PruningMode, Role}; use structopt::StructOpt; /// Parameters to define the pruning mode @@ -54,13 +54,13 @@ impl PruningParams { "Validators should run with state pruning disabled (i.e. archive). \ You can ignore this check with `--unsafe-pruning`." .to_string(), - )); + )) } PruningMode::keep_blocks(s.parse().map_err(|_| { error::Error::Input("Invalid pruning mode specified".to_string()) })?) - } + }, }) } diff --git a/substrate/client/cli/src/params/shared_params.rs b/substrate/client/cli/src/params/shared_params.rs index c0317c280a9d04a145002a9124ef2e6a9a37a061..5ded5846e34c3c1cd12d29d5f0d2bd8b22e1c95d 100644 --- a/substrate/client/cli/src/params/shared_params.rs +++ b/substrate/client/cli/src/params/shared_params.rs @@ -16,10 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::arg_enums::TracingReceiver; use sc_service::config::BasePath; use std::path::PathBuf; use structopt::StructOpt; -use crate::arg_enums::TracingReceiver; /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt, Clone)] @@ -88,13 +88,12 @@ impl SharedParams { pub fn chain_id(&self, is_dev: bool) -> String { match self.chain { Some(ref chain) => chain.clone(), - None => { + None => if is_dev { "dev".into() } else { "".into() - } - } + }, } } diff --git a/substrate/client/cli/src/runner.rs b/substrate/client/cli/src/runner.rs index 947cdd5a21e504dfac6323b0da49afd8121ab2c2..f305f8cbbeaffe51a76403f700724cea992b2be7 100644 --- a/substrate/client/cli/src/runner.rs +++ b/substrate/client/cli/src/runner.rs @@ -16,19 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::CliConfiguration; -use crate::Result; -use crate::SubstrateCli; +use crate::{error::Error as CliError, CliConfiguration, Result, SubstrateCli}; use chrono::prelude::*; -use futures::pin_mut; -use futures::select; -use futures::{future, future::FutureExt, Future}; +use futures::{future, future::FutureExt, pin_mut, select, Future}; use log::info; -use sc_service::{Configuration, TaskType, TaskManager}; +use sc_service::{Configuration, Error as ServiceError, TaskManager, TaskType}; use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; use std::marker::PhantomData; -use sc_service::Error as ServiceError; -use crate::error::Error as CliError; #[cfg(target_family = "unix")] async fn main(func: F) -> std::result::Result<(), E> @@ -119,27 +113,19 @@ pub struct Runner { impl Runner { /// Create a new runtime with the command provided in argument - pub fn new( - cli: &C, - command: &T, - ) -> Result> { + pub fn new(cli: &C, command: &T) -> Result> { let tokio_runtime = build_runtime()?; let runtime_handle = tokio_runtime.handle().clone(); - let task_executor = move |fut, task_type| { - match task_type { - TaskType::Async => runtime_handle.spawn(fut).map(drop), - TaskType::Blocking => - runtime_handle.spawn_blocking(move || futures::executor::block_on(fut)) - .map(drop), - } + let task_executor = move |fut, task_type| match task_type { + TaskType::Async => runtime_handle.spawn(fut).map(drop), + TaskType::Blocking => runtime_handle + .spawn_blocking(move || futures::executor::block_on(fut)) + .map(drop), }; Ok(Runner { - config: command.create_configuration( - cli, - task_executor.into(), - )?, + config: command.create_configuration(cli, task_executor.into())?, tokio_runtime, phantom: PhantomData, }) @@ -183,7 +169,7 @@ impl Runner { /// A helper function that runs a command with the configuration of this node. pub fn sync_run( self, - runner: impl FnOnce(Configuration) -> std::result::Result<(), E> + runner: impl FnOnce(Configuration) -> std::result::Result<(), E>, ) -> std::result::Result<(), E> where E: std::error::Error + Send + Sync + 'static + From, @@ -194,7 +180,8 @@ impl Runner { /// A helper function that runs a future with tokio and stops if the process receives /// the signal `SIGTERM` or `SIGINT`. pub fn async_run( - self, runner: impl FnOnce(Configuration) -> std::result::Result<(F, TaskManager), E>, + self, + runner: impl FnOnce(Configuration) -> std::result::Result<(F, TaskManager), E>, ) -> std::result::Result<(), E> where F: Future>, @@ -219,19 +206,17 @@ impl Runner { pub fn print_node_infos(config: &Configuration) { info!("{}", C::impl_name()); info!("✌️ version {}", C::impl_version()); - info!( - "❤️ by {}, {}-{}", - C::author(), - C::copyright_start_year(), - Local::today().year(), - ); + info!("❤️ by {}, {}-{}", C::author(), C::copyright_start_year(), Local::today().year(),); info!("📋 Chain specification: {}", config.chain_spec.name()); info!("🏷 Node name: {}", config.network.node_name); info!("👤 Role: {}", config.display_role()); - info!("💾 Database: {} at {}", - config.database, - config.database.path().map_or_else(|| "".to_owned(), |p| p.display().to_string()) + info!( + "💾 Database: {} at {}", + config.database, + config + .database + .path() + .map_or_else(|| "".to_owned(), |p| p.display().to_string()) ); info!("⛓ Native runtime: {}", C::native_runtime_version(&config.chain_spec)); } - diff --git a/substrate/client/consensus/aura/src/import_queue.rs b/substrate/client/consensus/aura/src/import_queue.rs index c3faa5382686e9aed0d449b27c18cc2909f3343b..a8036f28f164849eb7b6ab7aaee5f80f6733cb06 100644 --- a/substrate/client/consensus/aura/src/import_queue.rs +++ b/substrate/client/consensus/aura/src/import_queue.rs @@ -18,36 +18,37 @@ //! Module implementing the logic for verifying and importing AuRa blocks. -use crate::{AuthorityId, find_pre_digest, slot_author, aura_err, Error, authorities}; -use std::{ - sync::Arc, marker::PhantomData, hash::Hash, fmt::Debug, -}; +use crate::{aura_err, authorities, find_pre_digest, slot_author, AuthorityId, Error}; +use codec::{Codec, Decode, Encode}; use log::{debug, info, trace}; use prometheus_endpoint::Registry; -use codec::{Encode, Decode, Codec}; +use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_consensus_slots::{check_equivocation, CheckedHeader, InherentDataProviderExt}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + HeaderBackend, ProvideCache, +}; use sp_consensus::{ - BlockImport, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, - import_queue::{ - Verifier, BasicQueue, DefaultImportQueue, BoxJustificationImport, - }, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, + BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Error as ConsensusError, + ForkChoiceStrategy, }; -use sc_client_api::{BlockOf, UsageProvider, backend::AuxStore}; -use sp_blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, ProvideCache, HeaderBackend}; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_runtime::{generic::{BlockId, OpaqueDigestItemId}, Justifications}; -use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor}; -use sp_api::ProvideRuntimeApi; +use sp_consensus_aura::{ + digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi, ConsensusLog, + AURA_ENGINE_ID, +}; +use sp_consensus_slots::Slot; use sp_core::crypto::Pair; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_TRACE, CONSENSUS_DEBUG}; -use sc_consensus_slots::{CheckedHeader, check_equivocation, InherentDataProviderExt}; -use sp_consensus_slots::Slot; -use sp_api::ApiExt; -use sp_consensus_aura::{ - digests::CompatibleDigestItem, AuraApi, inherents::AuraInherentData, - ConsensusLog, AURA_ENGINE_ID, +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, DigestItemFor, Header}, + Justifications, }; +use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; /// check a header has been signed by the right key. If the slot is too far in the future, an error /// will be returned. If it's successful, returns the pre-header and the digest item @@ -61,7 +62,8 @@ fn check_header( hash: B::Hash, authorities: &[AuthorityId

], check_for_equivocation: CheckForEquivocation, -) -> Result)>, Error> where +) -> Result)>, Error> +where DigestItemFor: CompatibleDigestItem, P::Signature: Codec, C: sc_client_api::backend::AuxStore, @@ -69,9 +71,7 @@ fn check_header( { let seal = header.digest_mut().pop().ok_or_else(|| Error::HeaderUnsealed(hash))?; - let sig = seal.as_aura_seal().ok_or_else(|| { - aura_err(Error::HeaderBadSeal(hash)) - })?; + let sig = seal.as_aura_seal().ok_or_else(|| aura_err(Error::HeaderBadSeal(hash)))?; let slot = find_pre_digest::(&header)?; @@ -81,20 +81,17 @@ fn check_header( } else { // check the signature is valid under the expected authority and // chain state. - let expected_author = slot_author::

(slot, &authorities) - .ok_or_else(|| Error::SlotAuthorNotFound)?; + let expected_author = + slot_author::

(slot, &authorities).ok_or_else(|| Error::SlotAuthorNotFound)?; let pre_hash = header.hash(); if P::verify(&sig, pre_hash.as_ref(), expected_author) { if check_for_equivocation.check_for_equivocation() { - if let Some(equivocation_proof) = check_equivocation( - client, - slot_now, - slot, - &header, - expected_author, - ).map_err(Error::Client)? { + if let Some(equivocation_proof) = + check_equivocation(client, slot_now, slot, &header, expected_author) + .map_err(Error::Client)? + { info!( target: "aura", "Slot author is equivocating at slot {} with headers {:?} and {:?}", @@ -141,7 +138,8 @@ impl AuraVerifier { } } -impl AuraVerifier where +impl AuraVerifier +where P: Send + Sync + 'static, CAW: Send + Sync + 'static, CIDP: Send, @@ -152,8 +150,10 @@ impl AuraVerifier where block_id: BlockId, inherent_data: sp_inherents::InherentData, create_inherent_data_providers: CIDP::InherentDataProviders, - ) -> Result<(), Error> where - C: ProvideRuntimeApi, C::Api: BlockBuilderApi, + ) -> Result<(), Error> + where + C: ProvideRuntimeApi, + C::Api: BlockBuilderApi, CAW: CanAuthorWith, CIDP: CreateInherentDataProviders, { @@ -167,11 +167,11 @@ impl AuraVerifier where return Ok(()) } - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(|e| Error::Client(e.into()))?; + let inherent_res = self + .client + .runtime_api() + .check_inherents(&block_id, block, inherent_data) + .map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { for (i, e) in inherent_res.into_errors() { @@ -187,13 +187,14 @@ impl AuraVerifier where } #[async_trait::async_trait] -impl Verifier for AuraVerifier where - C: ProvideRuntimeApi + - Send + - Sync + - sc_client_api::backend::AuxStore + - ProvideCache + - BlockOf, +impl Verifier for AuraVerifier +where + C: ProvideRuntimeApi + + Send + + Sync + + sc_client_api::backend::AuxStore + + ProvideCache + + BlockOf, C::Api: BlockBuilderApi + AuraApi> + ApiExt, DigestItemFor: CompatibleDigestItem, P: Pair + Send + Sync + 'static, @@ -215,15 +216,14 @@ impl Verifier for AuraVerifier w let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; - let create_inherent_data_providers = self.create_inherent_data_providers - .create_inherent_data_providers( - parent_hash, - (), - ) + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) .await .map_err(|e| Error::::Client(sp_blockchain::Error::Application(e)))?; - let mut inherent_data = create_inherent_data_providers.create_inherent_data() + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() .map_err(Error::::Inherent)?; let slot_now = create_inherent_data_providers.slot(); @@ -238,7 +238,8 @@ impl Verifier for AuraVerifier w hash, &authorities[..], self.check_for_equivocation, - ).map_err(|e| e.to_string())?; + ) + .map_err(|e| e.to_string())?; match checked_header { CheckedHeader::Checked(pre_header, (slot, seal)) => { // if the body is passed through, we need to use the runtime @@ -250,7 +251,8 @@ impl Verifier for AuraVerifier w inherent_data.aura_replace_inherent_data(slot); // skip the inherents verification if the runtime API is old. - if self.client + if self + .client .runtime_api() .has_api_with::, _>( &BlockId::Hash(parent_hash), @@ -263,7 +265,9 @@ impl Verifier for AuraVerifier w BlockId::Hash(parent_hash), inherent_data, create_inherent_data_providers, - ).await.map_err(|e| e.to_string())?; + ) + .await + .map_err(|e| e.to_string())?; } let (_, inner_body) = block.deconstruct(); @@ -279,16 +283,18 @@ impl Verifier for AuraVerifier w ); // Look for an authorities-change log. - let maybe_keys = pre_header.digest() + let maybe_keys = pre_header + .digest() .logs() .iter() - .filter_map(|l| l.try_to::>>( - OpaqueDigestItemId::Consensus(&AURA_ENGINE_ID) - )) + .filter_map(|l| { + l.try_to::>>(OpaqueDigestItemId::Consensus( + &AURA_ENGINE_ID, + )) + }) .find_map(|l| match l { - ConsensusLog::AuthoritiesChange(a) => Some( - vec![(well_known_cache_keys::AUTHORITIES, a.encode())] - ), + ConsensusLog::AuthoritiesChange(a) => + Some(vec![(well_known_cache_keys::AUTHORITIES, a.encode())]), _ => None, }); @@ -300,7 +306,7 @@ impl Verifier for AuraVerifier w import_block.post_hash = Some(hash); Ok((import_block, maybe_keys)) - } + }, CheckedHeader::Deferred(a, b) => { debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); telemetry!( @@ -312,7 +318,7 @@ impl Verifier for AuraVerifier w "b" => ?b, ); Err(format!("Header {:?} rejected: too far in the future", hash)) - } + }, } } } @@ -375,8 +381,9 @@ pub fn import_queue<'a, P, Block, I, C, S, CAW, CIDP>( can_author_with, check_for_equivocation, telemetry, - }: ImportQueueParams<'a, Block, I, C, S, CAW, CIDP> -) -> Result, sp_consensus::Error> where + }: ImportQueueParams<'a, Block, I, C, S, CAW, CIDP>, +) -> Result, sp_consensus::Error> +where Block: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, C: 'static @@ -388,7 +395,7 @@ pub fn import_queue<'a, P, Block, I, C, S, CAW, CIDP>( + AuxStore + UsageProvider + HeaderBackend, - I: BlockImport> + I: BlockImport> + Send + Sync + 'static, @@ -401,23 +408,15 @@ pub fn import_queue<'a, P, Block, I, C, S, CAW, CIDP>( CIDP: CreateInherentDataProviders + Sync + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { - let verifier = build_verifier::( - BuildVerifierParams { - client, - create_inherent_data_providers, - can_author_with, - check_for_equivocation, - telemetry, - }, - ); + let verifier = build_verifier::(BuildVerifierParams { + client, + create_inherent_data_providers, + can_author_with, + check_for_equivocation, + telemetry, + }); - Ok(BasicQueue::new( - verifier, - Box::new(block_import), - justification_import, - spawner, - registry, - )) + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } /// Parameters of [`build_verifier`]. @@ -442,7 +441,7 @@ pub fn build_verifier( can_author_with, check_for_equivocation, telemetry, - }: BuildVerifierParams + }: BuildVerifierParams, ) -> AuraVerifier { AuraVerifier::<_, P, _, _>::new( client, diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 72545eda077ba19c340d925224ab141134534af5..341b0ed25cc46ef7db51d5c64db06ccac2717e2d 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -31,50 +31,53 @@ //! NOTE: Aura itself is designed to be generic over the crypto used. #![forbid(missing_docs, unsafe_code)] use std::{ - sync::Arc, marker::PhantomData, hash::Hash, fmt::Debug, pin::Pin, convert::{TryFrom, TryInto}, + fmt::Debug, + hash::Hash, + marker::PhantomData, + pin::Pin, + sync::Arc, }; use futures::prelude::*; use log::{debug, trace}; -use codec::{Encode, Decode, Codec}; +use codec::{Codec, Decode, Encode}; -use sp_consensus::{ - BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, SelectChain, StateAction, -}; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; -use sp_blockchain::{Result as CResult, ProvideCache, HeaderBackend}; -use sp_core::crypto::Public; -use sp_application_crypto::{AppKey, AppPublic}; -use sp_runtime::{generic::BlockId, traits::NumberFor}; -use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero, Member}; -use sp_api::ProvideRuntimeApi; -use sp_core::crypto::Pair; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_inherents::CreateInherentDataProviders; -use sc_telemetry::TelemetryHandle; use sc_consensus_slots::{ - SlotInfo, BackoffAuthoringBlocksStrategy, InherentDataProviderExt, StorageChanges, + BackoffAuthoringBlocksStrategy, InherentDataProviderExt, SlotInfo, StorageChanges, +}; +use sc_telemetry::TelemetryHandle; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::{AppKey, AppPublic}; +use sp_blockchain::{HeaderBackend, ProvideCache, Result as CResult}; +use sp_consensus::{ + BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Environment, + Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, StateAction, }; use sp_consensus_slots::Slot; +use sp_core::crypto::{Pair, Public}; +use sp_inherents::CreateInherentDataProviders; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestItemFor, Header, Member, NumberFor, Zero}, +}; mod import_queue; -pub use sp_consensus_aura::{ - ConsensusLog, AuraApi, AURA_ENGINE_ID, digests::CompatibleDigestItem, - inherents::{ - InherentType as AuraInherent, - INHERENT_IDENTIFIER, InherentDataProvider, - }, -}; -pub use sp_consensus::SyncOracle; pub use import_queue::{ - ImportQueueParams, import_queue, CheckForEquivocation, - build_verifier, BuildVerifierParams, AuraVerifier, + build_verifier, import_queue, AuraVerifier, BuildVerifierParams, CheckForEquivocation, + ImportQueueParams, }; pub use sc_consensus_slots::SlotProportion; +pub use sp_consensus::SyncOracle; +pub use sp_consensus_aura::{ + digests::CompatibleDigestItem, + inherents::{InherentDataProvider, InherentType as AuraInherent, INHERENT_IDENTIFIER}, + AuraApi, ConsensusLog, AURA_ENGINE_ID, +}; type AuthorityId

=

::Public; @@ -82,7 +85,8 @@ type AuthorityId

=

::Public; pub type SlotDuration = sc_consensus_slots::SlotDuration; /// Get type of `SlotDuration` for Aura. -pub fn slot_duration(client: &C) -> CResult where +pub fn slot_duration(client: &C) -> CResult +where A: Codec, B: BlockT, C: AuxStore + ProvideRuntimeApi + UsageProvider, @@ -93,7 +97,9 @@ pub fn slot_duration(client: &C) -> CResult where /// Get slot author for given block along with authorities. fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { - if authorities.is_empty() { return None } + if authorities.is_empty() { + return None + } let idx = *slot % (authorities.len() as u64); assert!( @@ -101,9 +107,10 @@ fn slot_author(slot: Slot, authorities: &[AuthorityId

]) -> Option<&A "It is impossible to have a vector with length beyond the address space; qed", ); - let current_author = authorities.get(idx as usize) - .expect("authorities not empty; index constrained to list length;\ - this is a valid index; qed"); + let current_author = authorities.get(idx as usize).expect( + "authorities not empty; index constrained to list length;\ + this is a valid index; qed", + ); Some(current_author) } @@ -325,9 +332,8 @@ where type BlockImport = I; type SyncOracle = SO; type JustificationSyncLink = L; - type CreateProposer = Pin> + Send + 'static - >>; + type CreateProposer = + Pin> + Send + 'static>>; type Proposer = E::Proposer; type Claim = P::Public; type EpochData = Vec>; @@ -376,22 +382,25 @@ where slot: Slot, _claim: &Self::Claim, ) -> Vec> { - vec![ - as CompatibleDigestItem>::aura_pre_digest(slot), - ] + vec![ as CompatibleDigestItem>::aura_pre_digest(slot)] } - fn block_import_params(&self) -> Box, - StorageChanges, B>, - Self::Claim, - Self::EpochData, - ) -> Result< - sp_consensus::BlockImportParams>, - sp_consensus::Error> + Send + 'static> - { + fn block_import_params( + &self, + ) -> Box< + dyn Fn( + B::Header, + &B::Hash, + Vec, + StorageChanges, B>, + Self::Claim, + Self::EpochData, + ) -> Result< + sp_consensus::BlockImportParams>, + sp_consensus::Error, + > + Send + + 'static, + > { let keystore = self.keystore.clone(); Box::new(move |header, header_hash, body, storage_changes, public, _epoch| { // sign the pre-sealed hash of the block and then @@ -402,28 +411,28 @@ where &*keystore, as AppKey>::ID, &public_type_pair, - header_hash.as_ref() - ).map_err(|e| sp_consensus::Error::CannotSign( - public.clone(), e.to_string(), - ))? - .ok_or_else(|| sp_consensus::Error::CannotSign( - public.clone(), "Could not find key in keystore.".into(), - ))?; - let signature = signature.clone().try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature( - signature, public - ))?; - - let signature_digest_item = < - DigestItemFor as CompatibleDigestItem - >::aura_seal(signature); + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + + let signature_digest_item = + as CompatibleDigestItem>::aura_seal(signature); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(signature_digest_item); import_block.body = Some(body); - import_block.state_action = StateAction::ApplyChanges( - sp_consensus::StorageChanges::Changes(storage_changes) - ); + import_block.state_action = + StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes(storage_changes)); import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); Ok(import_block) @@ -443,7 +452,7 @@ where self.client.info().finalized_number, slot, self.logging_target(), - ); + ) } } false @@ -458,9 +467,11 @@ where } fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin(self.env.init(block).map_err(|e| { - sp_consensus::Error::ClientImport(format!("{:?}", e)).into() - })) + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)).into()), + ) } fn telemetry(&self) -> Option { @@ -515,7 +526,7 @@ impl std::convert::From> for String { fn find_pre_digest(header: &B::Header) -> Result> { if header.number().is_zero() { - return Ok(0.into()); + return Ok(0.into()) } let mut pre_digest: Option = None; @@ -530,13 +541,15 @@ fn find_pre_digest(header: &B::Header) -> Result(client: &C, at: &BlockId) -> Result, ConsensusError> where +fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> +where A: Codec + Debug, B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache, C::Api: AuraApi, { - client.runtime_api() + client + .runtime_api() .authorities(at) .ok() .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet.into()) @@ -545,26 +558,31 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus #[cfg(test)] mod tests { use super::*; - use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, AlwaysCanAuthor, DisableProofRecording, - import_queue::BoxJustificationImport, SlotData, - }; - use sc_network_test::{Block as TestBlock, *}; - use sp_runtime::traits::{Block as BlockT, DigestFor}; - use sc_network::config::ProtocolConfig; use parking_lot::Mutex; - use sp_keyring::sr25519::Keyring; - use sc_client_api::BlockchainEvents; - use sp_consensus_aura::sr25519::AuthorityPair; - use sc_consensus_slots::{SimpleSlotWorker, BackoffAuthoringOnFinalizedHeadLagging}; - use std::{task::Poll, time::{Instant, Duration}}; use sc_block_builder::BlockBuilderProvider; - use sp_runtime::traits::Header as _; - use substrate_test_runtime_client::{TestClient, runtime::{Header, H256}}; + use sc_client_api::BlockchainEvents; + use sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker}; use sc_keystore::LocalKeystore; + use sc_network::config::ProtocolConfig; + use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::AURA; + use sp_consensus::{ + import_queue::BoxJustificationImport, AlwaysCanAuthor, DisableProofRecording, + NoNetwork as DummyOracle, Proposal, SlotData, + }; + use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; + use sp_keyring::sr25519::Keyring; + use sp_runtime::traits::{Block as BlockT, DigestFor, Header as _}; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; + use std::{ + task::Poll, + time::{Duration, Instant}, + }; + use substrate_test_runtime_client::{ + runtime::{Header, H256}, + TestClient, + }; type Error = sp_blockchain::Error; @@ -576,19 +594,15 @@ mod tests { type CreateProposer = futures::future::Ready>; type Error = Error; - fn init(&mut self, parent_header: &::Header) - -> Self::CreateProposer - { + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { futures::future::ready(Ok(DummyProposer(parent_header.number + 1, self.0.clone()))) } } impl Proposer for DummyProposer { type Error = Error; - type Transaction = sc_client_api::TransactionFor< - substrate_test_runtime_client::Backend, - TestBlock - >; + type Transaction = + sc_client_api::TransactionFor; type Proposal = future::Ready, Error>>; type ProofRecording = DisableProofRecording; type Proof = (); @@ -616,11 +630,13 @@ mod tests { PeersFullClient, AuthorityPair, AlwaysCanAuthor, - Box> + Box< + dyn CreateInherentDataProviders< + TestBlock, + (), + InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + >, + >, >; type AuraPeer = Peer<(), PeersClient>; @@ -635,14 +651,15 @@ mod tests { /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { - AuraTestNet { - peers: Vec::new(), - } + AuraTestNet { peers: Vec::new() } } - fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig, _peer_data: &()) - -> Self::Verifier - { + fn make_verifier( + &self, + client: PeersClient, + _cfg: &ProtocolConfig, + _peer_data: &(), + ) -> Self::Verifier { match client { PeersClient::Full(client, _) => { let slot_duration = slot_duration(&*client).expect("slot duration available"); @@ -668,7 +685,10 @@ mod tests { } } - fn make_block_import(&self, client: PeersClient) -> ( + fn make_block_import( + &self, + client: PeersClient, + ) -> ( BlockImportAdapter, Option>, Self::PeerData, @@ -693,11 +713,7 @@ mod tests { sp_tracing::try_init_simple(); let net = AuraTestNet::new(3); - let peers = &[ - (0, Keyring::Alice), - (1, Keyring::Bob), - (2, Keyring::Charlie), - ]; + let peers = &[(0, Keyring::Alice), (1, Keyring::Bob), (2, Keyring::Charlie)]; let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); @@ -710,9 +726,9 @@ mod tests { let client = peer.client().as_full().expect("full clients are created").clone(); let select_chain = peer.select_chain().expect("full client has a select chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore.")); - + let keystore = Arc::new( + LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."), + ); SyncCryptoStore::sr25519_generate_new(&*keystore, AURA, Some(&key.to_seed())) .expect("Creates authority key"); @@ -720,38 +736,46 @@ mod tests { let environ = DummyFactory(client.clone()); import_notifications.push( - client.import_notification_stream() - .take_while(|n| future::ready(!(n.origin != BlockOrigin::Own && n.header.number() < &5))) - .for_each(move |_| future::ready(())) + client + .import_notification_stream() + .take_while(|n| { + future::ready(!(n.origin != BlockOrigin::Own && n.header.number() < &5)) + }) + .for_each(move |_| future::ready(())), ); let slot_duration = slot_duration(&*client).expect("slot duration available"); - aura_futures.push(start_aura::(StartAuraParams { - slot_duration, - block_import: client.clone(), - select_chain, - client, - proposer_factory: environ, - sync_oracle: DummyOracle, - justification_sync_link: (), - create_inherent_data_providers: |_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); - let slot = InherentDataProvider::from_timestamp_and_duration( - *timestamp, - Duration::from_secs(6), - ); - - Ok((timestamp, slot)) - }, - force_authoring: false, - backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), - keystore, - can_author_with: sp_consensus::AlwaysCanAuthor, - block_proposal_slot_portion: SlotProportion::new(0.5), - max_block_proposal_slot_portion: None, - telemetry: None, - }).expect("Starts aura")); + aura_futures.push( + start_aura::(StartAuraParams { + slot_duration, + block_import: client.clone(), + select_chain, + client, + proposer_factory: environ, + sync_oracle: DummyOracle, + justification_sync_link: (), + create_inherent_data_providers: |_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }, + force_authoring: false, + backoff_authoring_blocks: Some( + BackoffAuthoringOnFinalizedHeadLagging::default(), + ), + keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, + block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, + telemetry: None, + }) + .expect("Starts aura"), + ); } futures::executor::block_on(future::select( @@ -759,10 +783,7 @@ mod tests { net.lock().poll(cx); Poll::<()>::Pending }), - future::select( - future::join_all(aura_futures), - future::join_all(import_notifications) - ) + future::select(future::join_all(aura_futures), future::join_all(import_notifications)), )); } @@ -771,11 +792,14 @@ mod tests { let client = substrate_test_runtime_client::new(); assert_eq!(client.chain_info().best_number, 0); - assert_eq!(authorities(&client, &BlockId::Number(0)).unwrap(), vec![ - Keyring::Alice.public().into(), - Keyring::Bob.public().into(), - Keyring::Charlie.public().into() - ]); + assert_eq!( + authorities(&client, &BlockId::Number(0)).unwrap(), + vec![ + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into() + ] + ); } #[test] @@ -785,12 +809,11 @@ mod tests { let mut authorities = vec![ Keyring::Alice.public().into(), Keyring::Bob.public().into(), - Keyring::Charlie.public().into() + Keyring::Charlie.public().into(), ]; let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore."); + let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."); let public = SyncCryptoStore::sr25519_generate_new(&keystore, AuthorityPair::ID, None) .expect("Key should be created"); authorities.push(public.into()); @@ -822,7 +845,7 @@ mod tests { H256::from_low_u64_be(0), H256::from_low_u64_be(0), Default::default(), - Default::default() + Default::default(), ); assert!(worker.claim_slot(&head, 0.into(), &authorities).is_none()); assert!(worker.claim_slot(&head, 1.into(), &authorities).is_none()); @@ -839,12 +862,13 @@ mod tests { let net = AuraTestNet::new(4); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore."); + let keystore = LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore."); SyncCryptoStore::sr25519_generate_new( &keystore, - AuthorityPair::ID, Some(&Keyring::Alice.to_seed()), - ).expect("Key should be created"); + AuthorityPair::ID, + Some(&Keyring::Alice.to_seed()), + ) + .expect("Key should be created"); let net = Arc::new(Mutex::new(net)); @@ -870,17 +894,16 @@ mod tests { let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); - let res = futures::executor::block_on(worker.on_slot( - SlotInfo { - slot: 0.into(), - timestamp: 0.into(), - ends_at: Instant::now() + Duration::from_secs(100), - inherent_data: InherentData::new(), - duration: Duration::from_millis(1000), - chain_head: head, - block_size_limit: None, - } - )).unwrap(); + let res = futures::executor::block_on(worker.on_slot(SlotInfo { + slot: 0.into(), + timestamp: 0.into(), + ends_at: Instant::now() + Duration::from_secs(100), + inherent_data: InherentData::new(), + duration: Duration::from_millis(1000), + chain_head: head, + block_size_limit: None, + })) + .unwrap(); // The returned block should be imported and we should be able to get its header by now. assert!(client.header(&BlockId::Hash(res.block.hash())).unwrap().is_some()); diff --git a/substrate/client/consensus/babe/rpc/src/lib.rs b/substrate/client/consensus/babe/rpc/src/lib.rs index e16c24acaca3648f89854aac94e9a936d7d9ed10..e85a4306553711a91753f6dd5eec101be9a05b64 100644 --- a/substrate/client/consensus/babe/rpc/src/lib.rs +++ b/substrate/client/consensus/babe/rpc/src/lib.rs @@ -18,30 +18,21 @@ //! RPC api for babe. -use sc_consensus_babe::{Epoch, authorship, Config}; use futures::{FutureExt as _, TryFutureExt as _}; -use jsonrpc_core::{ - Error as RpcError, - futures::future as rpc_future, -}; +use jsonrpc_core::{futures::future as rpc_future, Error as RpcError}; use jsonrpc_derive::rpc; +use sc_consensus_babe::{authorship, Config, Epoch}; use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; -use sp_consensus_babe::{ - AuthorityId, - BabeApi as BabeRuntimeApi, - digests::PreDigest, -}; +use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; -use sp_core::{ - crypto::Public, -}; +use sp_api::{BlockId, ProvideRuntimeApi}; use sp_application_crypto::AppKey; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sc_rpc_api::DenyUnsafe; -use sp_api::{ProvideRuntimeApi, BlockId}; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_consensus::{Error as ConsensusError, SelectChain}; +use sp_consensus_babe::{digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi}; +use sp_core::crypto::Public; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::traits::{Block as BlockT, Header as _}; -use sp_consensus::{SelectChain, Error as ConsensusError}; -use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as BlockChainError}; use std::{collections::HashMap, sync::Arc}; type FutureResult = Box + Send>; @@ -81,14 +72,7 @@ impl BabeRpcHandler { select_chain: SC, deny_unsafe: DenyUnsafe, ) -> Self { - Self { - client, - shared_epoch_changes, - keystore, - babe_config, - select_chain, - deny_unsafe, - } + Self { client, shared_epoch_changes, keystore, babe_config, select_chain, deny_unsafe } } } @@ -104,16 +88,10 @@ where { fn epoch_authorship(&self) -> FutureResult> { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Box::new(rpc_future::err(err.into())); + return Box::new(rpc_future::err(err.into())) } - let ( - babe_config, - keystore, - shared_epoch, - client, - select_chain, - ) = ( + let (babe_config, keystore, shared_epoch, client, select_chain) = ( self.babe_config.clone(), self.keystore.clone(), self.shared_epoch_changes.clone(), @@ -126,14 +104,9 @@ where .runtime_api() .current_epoch_start(&BlockId::Hash(header.hash())) .map_err(|err| Error::StringError(format!("{:?}", err)))?; - let epoch = epoch_data( - &shared_epoch, - &client, - &babe_config, - *epoch_start, - &select_chain, - ) - .await?; + let epoch = + epoch_data(&shared_epoch, &client, &babe_config, *epoch_start, &select_chain) + .await?; let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); let mut claims: HashMap = HashMap::new(); @@ -163,10 +136,10 @@ where match claim { PreDigest::Primary { .. } => { claims.entry(key).or_default().primary.push(slot); - } + }, PreDigest::SecondaryPlain { .. } => { claims.entry(key).or_default().secondary.push(slot); - } + }, PreDigest::SecondaryVRF { .. } => { claims.entry(key).or_default().secondary_vrf.push(slot.into()); }, @@ -199,7 +172,7 @@ pub enum Error { /// Consensus error Consensus(ConsensusError), /// Errors that can be formatted as a String - StringError(String) + StringError(String), } impl From for jsonrpc_core::Error { @@ -226,13 +199,15 @@ where SC: SelectChain, { let parent = select_chain.best_chain().await?; - epoch_changes.shared_data().epoch_data_for_child_of( - descendent_query(&**client), - &parent.hash(), - parent.number().clone(), - slot.into(), - |slot| Epoch::genesis(&babe_config, slot), - ) + epoch_changes + .shared_data() + .epoch_data_for_child_of( + descendent_query(&**client), + &parent.hash(), + parent.number().clone(), + slot.into(), + |slot| Epoch::genesis(&babe_config, slot), + ) .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) } @@ -240,31 +215,27 @@ where #[cfg(test)] mod tests { use super::*; + use sc_keystore::LocalKeystore; + use sp_application_crypto::AppPair; + use sp_core::crypto::key_types::BABE; + use sp_keyring::Sr25519Keyring; + use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use substrate_test_runtime_client::{ - runtime::Block, - Backend, - DefaultTestClientBuilderExt, - TestClient, + runtime::Block, Backend, DefaultTestClientBuilderExt, TestClient, TestClientBuilder, TestClientBuilderExt, - TestClientBuilder, }; - use sp_application_crypto::AppPair; - use sp_keyring::Sr25519Keyring; - use sp_core::{crypto::key_types::BABE}; - use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; - use sc_keystore::LocalKeystore; - use std::sync::Arc; - use sc_consensus_babe::{Config, block_import, AuthorityPair}; use jsonrpc_core::IoHandler; + use sc_consensus_babe::{block_import, AuthorityPair, Config}; + use std::sync::Arc; /// creates keystore backed by a temp file fn create_temp_keystore( authority: Sr25519Keyring, ) -> (SyncCryptoStorePtr, tempfile::TempDir) { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(&authority.to_seed())) .expect("Creates authority key"); @@ -272,17 +243,14 @@ mod tests { } fn test_babe_rpc_handler( - deny_unsafe: DenyUnsafe + deny_unsafe: DenyUnsafe, ) -> BabeRpcHandler> { let builder = TestClientBuilder::new(); let (client, longest_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); let config = Config::get_or_compute(&*client).expect("config available"); - let (_, link) = block_import( - config.clone(), - client.clone(), - client.clone(), - ).expect("can initialize block-import"); + let (_, link) = block_import(config.clone(), client.clone(), client.clone()) + .expect("can initialize block-import"); let epoch_changes = link.epoch_changes().clone(); let keystore = create_temp_keystore::(Sr25519Keyring::Alice).0; diff --git a/substrate/client/consensus/babe/src/authorship.rs b/substrate/client/consensus/babe/src/authorship.rs index 2a90ca3b94c006c5499c088c22cfc12411a404a0..609f96c83c194ebd738c6fbaaa83b79d4e7ed5e9 100644 --- a/substrate/client/consensus/babe/src/authorship.rs +++ b/substrate/client/consensus/babe/src/authorship.rs @@ -18,23 +18,17 @@ //! BABE authority selection and slot claiming. +use super::Epoch; +use codec::Encode; +use schnorrkel::{keys::PublicKey, vrf::VRFInOut}; use sp_application_crypto::AppKey; use sp_consensus_babe::{ - BABE_VRF_PREFIX, AuthorityId, BabeAuthorityWeight, make_transcript, make_transcript_data, - Slot, -}; -use sp_consensus_babe::digests::{ - PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, + digests::{PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest}, + make_transcript, make_transcript_data, AuthorityId, BabeAuthorityWeight, Slot, BABE_VRF_PREFIX, }; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; -use sp_core::{U256, blake2_256, crypto::Public}; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use codec::Encode; -use schnorrkel::{ - keys::PublicKey, - vrf::VRFInOut, -}; -use super::Epoch; +use sp_core::{blake2_256, crypto::Public, U256}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -49,8 +43,7 @@ pub(super) fn calculate_primary_threshold( let c = c.0 as f64 / c.1 as f64; - let theta = - authorities[authority_index].1 as f64 / + let theta = authorities[authority_index].1 as f64 / authorities.iter().map(|(_, weight)| weight).sum::() as f64; assert!(theta > 0.0, "authority with weight 0."); @@ -74,14 +67,14 @@ pub(super) fn calculate_primary_threshold( "returns None when the given value is negative; \ p is defined as `1 - n` where n is defined in (0, 1]; \ p must be a value in [0, 1); \ - qed." + qed.", ); let denom = p.denom().to_biguint().expect( "returns None when the given value is negative; \ p is defined as `1 - n` where n is defined in (0, 1]; \ p must be a value in [0, 1); \ - qed." + qed.", ); ((BigUint::one() << 128) * numer / denom).to_u128().expect( @@ -108,7 +101,7 @@ pub(super) fn secondary_slot_author( randomness: [u8; 32], ) -> Option<&AuthorityId> { if authorities.is_empty() { - return None; + return None } let rand = U256::from((randomness, slot).using_encoded(blake2_256)); @@ -116,9 +109,10 @@ pub(super) fn secondary_slot_author( let authorities_len = U256::from(authorities.len()); let idx = rand % authorities_len; - let expected_author = authorities.get(idx.as_u32() as usize) - .expect("authorities not empty; index constrained to list length; \ - this is a valid index; qed"); + let expected_author = authorities.get(idx.as_u32() as usize).expect( + "authorities not empty; index constrained to list length; \ + this is a valid index; qed", + ); Some(&expected_author.0) } @@ -136,23 +130,15 @@ fn claim_secondary_slot( let Epoch { authorities, randomness, epoch_index, .. } = epoch; if authorities.is_empty() { - return None; + return None } - let expected_author = secondary_slot_author( - slot, - authorities, - *randomness, - )?; + let expected_author = secondary_slot_author(slot, authorities, *randomness)?; for (authority_id, authority_index) in keys { if authority_id == expected_author { let pre_digest = if author_secondary_vrf { - let transcript_data = make_transcript_data( - randomness, - slot, - *epoch_index, - ); + let transcript_data = make_transcript_data(randomness, slot, *epoch_index); let result = SyncCryptoStore::sr25519_vrf_sign( &**keystore, AuthorityId::ID, @@ -169,7 +155,10 @@ fn claim_secondary_slot( } else { None } - } else if SyncCryptoStore::has_keys(&**keystore, &[(authority_id.to_raw_vec(), AuthorityId::ID)]) { + } else if SyncCryptoStore::has_keys( + &**keystore, + &[(authority_id.to_raw_vec(), AuthorityId::ID)], + ) { Some(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: *authority_index as u32, @@ -179,7 +168,7 @@ fn claim_secondary_slot( }; if let Some(pre_digest) = pre_digest { - return Some((pre_digest, authority_id.clone())); + return Some((pre_digest, authority_id.clone())) } } } @@ -196,7 +185,9 @@ pub fn claim_slot( epoch: &Epoch, keystore: &SyncCryptoStorePtr, ) -> Option<(PreDigest, AuthorityId)> { - let authorities = epoch.authorities.iter() + let authorities = epoch + .authorities + .iter() .enumerate() .map(|(index, a)| (a.0.clone(), index)) .collect::>(); @@ -211,22 +202,21 @@ pub fn claim_slot_using_keys( keystore: &SyncCryptoStorePtr, keys: &[(AuthorityId, usize)], ) -> Option<(PreDigest, AuthorityId)> { - claim_primary_slot(slot, epoch, epoch.config.c, keystore, &keys) - .or_else(|| { - if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || - epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() - { - claim_secondary_slot( - slot, - &epoch, - keys, - &keystore, - epoch.config.allowed_slots.is_secondary_vrf_slots_allowed(), - ) - } else { - None - } - }) + claim_primary_slot(slot, epoch, epoch.config.c, keystore, &keys).or_else(|| { + if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || + epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() + { + claim_secondary_slot( + slot, + &epoch, + keys, + &keystore, + epoch.config.allowed_slots.is_secondary_vrf_slots_allowed(), + ) + } else { + None + } + }) } /// Claim a primary slot if it is our turn. Returns `None` if it is not our turn. @@ -243,16 +233,8 @@ fn claim_primary_slot( let Epoch { authorities, randomness, epoch_index, .. } = epoch; for (authority_id, authority_index) in keys { - let transcript = make_transcript( - randomness, - slot, - *epoch_index - ); - let transcript_data = make_transcript_data( - randomness, - slot, - *epoch_index - ); + let transcript = make_transcript(randomness, slot, *epoch_index); + let transcript_data = make_transcript_data(randomness, slot, *epoch_index); // Compute the threshold we will use. // // We already checked that authorities contains `key.public()`, so it can't @@ -279,7 +261,7 @@ fn claim_primary_slot( authority_index: *authority_index as u32, }); - return Some((pre_digest, authority_id.clone())); + return Some((pre_digest, authority_id.clone())) } } } @@ -290,10 +272,10 @@ fn claim_primary_slot( #[cfg(test)] mod tests { use super::*; - use std::sync::Arc; - use sp_core::{sr25519::Pair, crypto::Pair as _}; - use sp_consensus_babe::{AuthorityId, BabeEpochConfiguration, AllowedSlots}; use sc_keystore::LocalKeystore; + use sp_consensus_babe::{AllowedSlots, AuthorityId, BabeEpochConfiguration}; + use sp_core::{crypto::Pair as _, sr25519::Pair}; + use std::sync::Arc; #[test] fn claim_secondary_plain_slot_works() { @@ -302,7 +284,8 @@ mod tests { &*keystore, AuthorityId::ID, Some(sp_core::crypto::DEV_PHRASE), - ).unwrap(); + ) + .unwrap(); let authorities = vec![ (AuthorityId::from(Pair::generate().0.public()), 5), diff --git a/substrate/client/consensus/babe/src/aux_schema.rs b/substrate/client/consensus/babe/src/aux_schema.rs index 69c1a1930bbb5992360555bbe3fd2f8ca56c976f..4be7dff3eedcd189ec4c9a0362e006d6f5da0279 100644 --- a/substrate/client/consensus/babe/src/aux_schema.rs +++ b/substrate/client/consensus/babe/src/aux_schema.rs @@ -18,15 +18,15 @@ //! Schema for BABE epoch changes in the aux-db. -use log::info; use codec::{Decode, Encode}; +use log::info; +use crate::{migration::EpochV0, Epoch}; use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use sp_runtime::traits::Block as BlockT; +use sc_consensus_epochs::{migration::EpochChangesForV0, EpochChangesFor, SharedEpochChanges}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus_babe::{BabeBlockWeight, BabeGenesisConfiguration}; -use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges, migration::EpochChangesForV0}; -use crate::{Epoch, migration::EpochV0}; +use sp_runtime::traits::Block as BlockT; const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; const BABE_EPOCH_CHANGES_KEY: &[u8] = b"babe_epoch_changes"; @@ -38,16 +38,16 @@ pub fn block_weight_key(block_hash: H) -> Vec { } fn load_decode(backend: &B, key: &[u8]) -> ClientResult> - where - B: AuxStore, - T: Decode, +where + B: AuxStore, + T: Decode, { let corrupt = |e: codec::Error| { ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e)) }; match backend.get_aux(key)? { None => Ok(None), - Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt) + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), } } @@ -59,32 +59,26 @@ pub fn load_epoch_changes( let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; let maybe_epoch_changes = match version { - None => load_decode::<_, EpochChangesForV0>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?.map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), - Some(1) => load_decode::<_, EpochChangesFor>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?.map(|v1| v1.map(|_, _, epoch| epoch.migrate(config))), - Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => load_decode::<_, EpochChangesFor>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?, - Some(other) => { - return Err(ClientError::Backend( - format!("Unsupported BABE DB version: {:?}", other) - )) - }, + None => + load_decode::<_, EpochChangesForV0>(backend, BABE_EPOCH_CHANGES_KEY)? + .map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), + Some(1) => + load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)? + .map(|v1| v1.map(|_, _, epoch| epoch.migrate(config))), + Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => + load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)?, + Some(other) => + return Err(ClientError::Backend(format!("Unsupported BABE DB version: {:?}", other))), }; - let epoch_changes = SharedEpochChanges::::new(maybe_epoch_changes.unwrap_or_else(|| { - info!( - target: "babe", - "👶 Creating empty BABE epoch changes on what appears to be first startup.", - ); - EpochChangesFor::::default() - })); + let epoch_changes = + SharedEpochChanges::::new(maybe_epoch_changes.unwrap_or_else(|| { + info!( + target: "babe", + "👶 Creating empty BABE epoch changes on what appears to be first startup.", + ); + EpochChangesFor::::default() + })); // rebalance the tree after deserialization. this isn't strictly necessary // since the tree is now rebalanced on every update operation. but since the @@ -99,15 +93,16 @@ pub fn load_epoch_changes( pub(crate) fn write_epoch_changes( epoch_changes: &EpochChangesFor, write_aux: F, -) -> R where +) -> R +where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { BABE_EPOCH_CHANGES_CURRENT_VERSION.using_encoded(|version| { let encoded_epoch_changes = epoch_changes.encode(); - write_aux( - &[(BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), - (BABE_EPOCH_CHANGES_VERSION, version)], - ) + write_aux(&[ + (BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), + (BABE_EPOCH_CHANGES_VERSION, version), + ]) }) } @@ -116,15 +111,12 @@ pub(crate) fn write_block_weight( block_hash: H, block_weight: BabeBlockWeight, write_aux: F, -) -> R where +) -> R +where F: FnOnce(&[(Vec, &[u8])]) -> R, { let key = block_weight_key(block_hash); - block_weight.using_encoded(|s| - write_aux( - &[(key, s)], - ) - ) + block_weight.using_encoded(|s| write_aux(&[(key, s)])) } /// Load the cumulative chain-weight associated with a block. @@ -140,13 +132,13 @@ mod test { use super::*; use crate::migration::EpochV0; use fork_tree::ForkTree; - use substrate_test_runtime_client; + use sc_consensus_epochs::{EpochHeader, PersistedEpoch, PersistedEpochHeader}; + use sc_network_test::Block as TestBlock; + use sp_consensus::Error as ConsensusError; + use sp_consensus_babe::{AllowedSlots, BabeGenesisConfiguration}; use sp_core::H256; use sp_runtime::traits::NumberFor; - use sp_consensus_babe::{AllowedSlots, BabeGenesisConfiguration}; - use sc_consensus_epochs::{PersistedEpoch, PersistedEpochHeader, EpochHeader}; - use sp_consensus::Error as ConsensusError; - use sc_network_test::Block as TestBlock; + use substrate_test_runtime_client; #[test] fn load_decode_from_v0_epoch_changes() { @@ -159,26 +151,30 @@ mod test { }; let client = substrate_test_runtime_client::new(); let mut v0_tree = ForkTree::, _>::new(); - v0_tree.import::<_, ConsensusError>( - Default::default(), - Default::default(), - PersistedEpoch::Regular(epoch), - &|_, _| Ok(false), // Test is single item only so this can be set to false. - ).unwrap(); - - client.insert_aux( - &[(BABE_EPOCH_CHANGES_KEY, - &EpochChangesForV0::::from_raw(v0_tree).encode()[..])], - &[], - ).unwrap(); - - assert_eq!( - load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), - None, - ); + v0_tree + .import::<_, ConsensusError>( + Default::default(), + Default::default(), + PersistedEpoch::Regular(epoch), + &|_, _| Ok(false), // Test is single item only so this can be set to false. + ) + .unwrap(); + + client + .insert_aux( + &[( + BABE_EPOCH_CHANGES_KEY, + &EpochChangesForV0::::from_raw(v0_tree).encode()[..], + )], + &[], + ) + .unwrap(); + + assert_eq!(load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), None,); let epoch_changes = load_epoch_changes::( - &client, &BabeGenesisConfiguration { + &client, + &BabeGenesisConfiguration { slot_duration: 10, epoch_length: 4, c: (3, 10), @@ -186,10 +182,12 @@ mod test { randomness: Default::default(), allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, }, - ).unwrap(); + ) + .unwrap(); assert!( - epoch_changes.shared_data() + epoch_changes + .shared_data() .tree() .iter() .map(|(_, _, epoch)| epoch.clone()) @@ -200,16 +198,10 @@ mod test { })], ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. - write_epoch_changes::( - &epoch_changes.shared_data(), - |values| { - client.insert_aux(values, &[]).unwrap(); - }, - ); + write_epoch_changes::(&epoch_changes.shared_data(), |values| { + client.insert_aux(values, &[]).unwrap(); + }); - assert_eq!( - load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), - Some(2), - ); + assert_eq!(load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), Some(2),); } } diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 61b58bf1b5999492715d0c3872794acae9d69808..315bd4e9921aa31c793c2bc06a9e1eba5a9a146b 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -71,9 +71,13 @@ use std::{ }; use codec::{Decode, Encode}; -use futures::channel::mpsc::{channel, Receiver, Sender}; -use futures::channel::oneshot; -use futures::prelude::*; +use futures::{ + channel::{ + mpsc::{channel, Receiver, Sender}, + oneshot, + }, + prelude::*, +}; use log::{debug, info, log, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::Registry; @@ -89,18 +93,16 @@ use sc_consensus_slots::{ SlotInfo, StorageChanges, }; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; -use sp_api::ApiExt; -use sp_api::{NumberFor, ProvideRuntimeApi}; +use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; use sp_application_crypto::AppKey; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{ Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, }; -use sp_consensus::{import_queue::BoxJustificationImport, CanAuthorWith, ImportResult}; use sp_consensus::{ - import_queue::{BasicQueue, CacheKeyId, DefaultImportQueue, Verifier}, - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Environment, - Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, SlotData, + import_queue::{BasicQueue, BoxJustificationImport, CacheKeyId, DefaultImportQueue, Verifier}, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Environment, + Error as ConsensusError, ForkChoiceStrategy, ImportResult, Proposer, SelectChain, SlotData, StateAction, }; use sp_consensus_babe::inherents::BabeInherentData; @@ -159,7 +161,7 @@ impl EpochT for Epoch { fn increment( &self, - (descriptor, config): (NextEpochDescriptor, BabeEpochConfiguration) + (descriptor, config): (NextEpochDescriptor, BabeEpochConfiguration), ) -> Epoch { Epoch { epoch_index: self.epoch_index + 1, @@ -183,10 +185,7 @@ impl EpochT for Epoch { impl Epoch { /// Create the genesis epoch (epoch #0). This is defined to start at the slot of /// the first block, so that has to be provided. - pub fn genesis( - genesis_config: &BabeGenesisConfiguration, - slot: Slot, - ) -> Epoch { + pub fn genesis(genesis_config: &BabeGenesisConfiguration, slot: Slot) -> Epoch { Epoch { epoch_index: 0, start_slot: slot, @@ -253,7 +252,11 @@ pub enum Error { #[display(fmt = "No secondary author expected.")] NoSecondaryAuthorExpected, /// VRF verification of block by author failed - #[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)] + #[display( + fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", + _0, + _1 + )] VRFVerificationOfBlockFailed(AuthorityId, u128), /// VRF verification failed #[display(fmt = "VRF verification failed: {:?}", _0)] @@ -320,35 +323,36 @@ pub struct Config(sc_consensus_slots::SlotDuration); impl Config { /// Either fetch the slot duration from disk or compute it from the genesis /// state. - pub fn get_or_compute(client: &C) -> ClientResult where - C: AuxStore + ProvideRuntimeApi + UsageProvider, C::Api: BabeApi, + pub fn get_or_compute(client: &C) -> ClientResult + where + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: BabeApi, { trace!(target: "babe", "Getting slot duration"); match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| { - let has_api_v1 = a.has_api_with::, _>( - &b, |v| v == 1, - )?; - let has_api_v2 = a.has_api_with::, _>( - &b, |v| v == 2, - )?; + let has_api_v1 = a.has_api_with::, _>(&b, |v| v == 1)?; + let has_api_v2 = a.has_api_with::, _>(&b, |v| v == 2)?; if has_api_v1 { - #[allow(deprecated)] { + #[allow(deprecated)] + { Ok(a.configuration_before_version_2(b)?.into()) } } else if has_api_v2 { a.configuration(b).map_err(Into::into) } else { Err(sp_blockchain::Error::VersionInvalid( - "Unsupported or invalid BabeApi version".to_string() + "Unsupported or invalid BabeApi version".to_string(), )) } - }).map(Self) { + }) + .map(Self) + { Ok(s) => Ok(s), Err(s) => { warn!(target: "babe", "Failed to get slot duration"); Err(s) - } + }, } } @@ -502,7 +506,8 @@ where let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); - let answer_requests = answer_requests(worker_rx, config.0, client, babe_link.epoch_changes.clone()); + let answer_requests = + answer_requests(worker_rx, config.0, client, babe_link.epoch_changes.clone()); Ok(BabeWorker { inner: Box::pin(future::join(inner, answer_requests).map(|_| ())), slot_notification_sinks, @@ -515,28 +520,37 @@ async fn answer_requests( genesis_config: sc_consensus_slots::SlotDuration, client: Arc, epoch_changes: SharedEpochChanges, -) - where C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents - + HeaderBackend + HeaderMetadata + Send + Sync + 'static, +) where + C: ProvideRuntimeApi + + ProvideCache + + ProvideUncles + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, { while let Some(request) = request_rx.next().await { match request { BabeRequest::EpochForChild(parent_hash, parent_number, slot_number, response) => { let lookup = || { let epoch_changes = epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( - descendent_query(&*client), - &parent_hash, - parent_number, - slot_number, - ) + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*client), + &parent_hash, + parent_number, + slot_number, + ) .map_err(|e| Error::::ForkTree(Box::new(e)))? .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&genesis_config, slot) - ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| { + Epoch::genesis(&genesis_config, slot) + }) + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; Ok(sp_consensus_babe::Epoch { epoch_index: viable_epoch.as_ref().epoch_index, @@ -549,7 +563,7 @@ async fn answer_requests( }; let _ = response.send(lookup()); - } + }, } } } @@ -584,7 +598,7 @@ impl BabeWorkerHandle { /// Worker for Babe which implements `Future`. This must be polled. #[must_use] pub struct BabeWorker { - inner: Pin + Send + 'static>>, + inner: Pin + Send + 'static>>, slot_notification_sinks: SlotNotificationSinks, handle: BabeWorkerHandle, } @@ -593,7 +607,7 @@ impl BabeWorker { /// Return an event stream of notifications for when new slot happens, and the corresponding /// epoch descriptor. pub fn slot_notification_stream( - &self + &self, ) -> Receiver<(Slot, ViableEpochDescriptor, Epoch>)> { const CHANNEL_BUFFER_SIZE: usize = 1024; @@ -613,7 +627,7 @@ impl futures::Future for BabeWorker { fn poll( mut self: Pin<&mut Self>, - cx: &mut futures::task::Context + cx: &mut futures::task::Context, ) -> futures::task::Poll { self.inner.as_mut().poll(cx) } @@ -621,7 +635,7 @@ impl futures::Future for BabeWorker { /// Slot notification sinks. type SlotNotificationSinks = Arc< - Mutex::Hash, NumberFor, Epoch>)>>> + Mutex::Hash, NumberFor, Epoch>)>>>, >; struct BabeSlotWorker { @@ -662,9 +676,8 @@ where type Claim = (PreDigest, AuthorityId); type SyncOracle = SO; type JustificationSyncLink = L; - type CreateProposer = Pin> + Send + 'static - >>; + type CreateProposer = + Pin> + Send + 'static>>; type Proposer = E::Proposer; type BlockImport = I; @@ -681,12 +694,14 @@ where parent: &B::Header, slot: Slot, ) -> Result { - self.epoch_changes.shared_data().epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent.hash(), - parent.number().clone(), - slot, - ) + self.epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot, + ) .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } @@ -707,10 +722,10 @@ where debug!(target: "babe", "Attempting to claim slot {}", slot); let s = authorship::claim_slot( slot, - self.epoch_changes.shared_data().viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - )?.as_ref(), + self.epoch_changes + .shared_data() + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot))? + .as_ref(), &self.keystore, ); @@ -727,20 +742,18 @@ where slot: Slot, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) { - self.slot_notification_sinks.lock() - .retain_mut(|sink| { - match sink.try_send((slot, epoch_descriptor.clone())) { - Ok(()) => true, - Err(e) => { - if e.is_full() { - warn!(target: "babe", "Trying to notify a slot but the channel is full"); - true - } else { - false - } + self.slot_notification_sinks.lock().retain_mut(|sink| { + match sink.try_send((slot, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => + if e.is_full() { + warn!(target: "babe", "Trying to notify a slot but the channel is full"); + true + } else { + false }, - } - }); + } + }); } fn pre_digest_data( @@ -748,59 +761,64 @@ where _slot: Slot, claim: &Self::Claim, ) -> Vec> { - vec![ - as CompatibleDigestItem>::babe_pre_digest(claim.0.clone()), - ] + vec![ as CompatibleDigestItem>::babe_pre_digest(claim.0.clone())] } - fn block_import_params(&self) -> Box, - StorageChanges, - Self::Claim, - Self::EpochData, - ) -> Result< - sp_consensus::BlockImportParams, - sp_consensus::Error> + Send + 'static> - { + fn block_import_params( + &self, + ) -> Box< + dyn Fn( + B::Header, + &B::Hash, + Vec, + StorageChanges, + Self::Claim, + Self::EpochData, + ) -> Result, sp_consensus::Error> + + Send + + 'static, + > { let keystore = self.keystore.clone(); - Box::new(move |header, header_hash, body, storage_changes, (_, public), epoch_descriptor| { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let public_type_pair = public.clone().into(); - let public = public.to_raw_vec(); - let signature = SyncCryptoStore::sign_with( - &*keystore, - ::ID, - &public_type_pair, - header_hash.as_ref() - ) - .map_err(|e| sp_consensus::Error::CannotSign( - public.clone(), e.to_string(), - ))? - .ok_or_else(|| sp_consensus::Error::CannotSign( - public.clone(), "Could not find key in keystore.".into(), - ))?; - let signature: AuthoritySignature = signature.clone().try_into() - .map_err(|_| sp_consensus::Error::InvalidSignature( - signature, public - ))?; - let digest_item = as CompatibleDigestItem>::babe_seal(signature.into()); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(digest_item); - import_block.body = Some(body); - import_block.state_action = StateAction::ApplyChanges( - sp_consensus::StorageChanges::Changes(storage_changes) - ); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, - ); + Box::new( + move |header, header_hash, body, storage_changes, (_, public), epoch_descriptor| { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let public_type_pair = public.clone().into(); + let public = public.to_raw_vec(); + let signature = SyncCryptoStore::sign_with( + &*keystore, + ::ID, + &public_type_pair, + header_hash.as_ref(), + ) + .map_err(|e| sp_consensus::Error::CannotSign(public.clone(), e.to_string()))? + .ok_or_else(|| { + sp_consensus::Error::CannotSign( + public.clone(), + "Could not find key in keystore.".into(), + ) + })?; + let signature: AuthoritySignature = signature + .clone() + .try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; + let digest_item = + as CompatibleDigestItem>::babe_seal(signature.into()); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.state_action = StateAction::ApplyChanges( + sp_consensus::StorageChanges::Changes(storage_changes), + ); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, + ); - Ok(import_block) - }) + Ok(import_block) + }, + ) } fn force_authoring(&self) -> bool { @@ -809,8 +827,8 @@ where fn should_backoff(&self, slot: Slot, chain_head: &B::Header) -> bool { if let Some(ref strategy) = self.backoff_authoring_blocks { - if let Ok(chain_head_slot) = find_pre_digest::(chain_head) - .map(|digest| digest.slot()) + if let Ok(chain_head_slot) = + find_pre_digest::(chain_head).map(|digest| digest.slot()) { return strategy.should_backoff( *chain_head.number(), @@ -818,7 +836,7 @@ where self.client.info().finalized_number, slot, self.logging_target(), - ); + ) } } false @@ -833,9 +851,11 @@ where } fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin(self.env.init(block).map_err(|e| { - sp_consensus::Error::ClientImport(format!("{:?}", e)) - })) + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + ) } fn telemetry(&self) -> Option { @@ -865,7 +885,7 @@ pub fn find_pre_digest(header: &B::Header) -> Result = None; @@ -881,16 +901,19 @@ pub fn find_pre_digest(header: &B::Header) -> Result(header: &B::Header) - -> Result, Error> - where DigestItemFor: CompatibleDigestItem, +fn find_next_epoch_digest( + header: &B::Header, +) -> Result, Error> +where + DigestItemFor: CompatibleDigestItem, { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, epoch_digest.is_some()) { - (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(_)), true) => + return Err(babe_err(Error::MultipleEpochChangeDigests)), (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -900,16 +923,19 @@ fn find_next_epoch_digest(header: &B::Header) } /// Extract the BABE config change digest from the given header, if it exists. -fn find_next_config_digest(header: &B::Header) - -> Result, Error> - where DigestItemFor: CompatibleDigestItem, +fn find_next_config_digest( + header: &B::Header, +) -> Result, Error> +where + DigestItemFor: CompatibleDigestItem, { let mut config_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); match (log, config_digest.is_some()) { - (Some(ConsensusLog::NextConfigData(_)), true) => return Err(babe_err(Error::MultipleConfigChangeDigests)), + (Some(ConsensusLog::NextConfigData(_)), true) => + return Err(babe_err(Error::MultipleConfigChangeDigests)), (Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config), _ => trace!(target: "babe", "Ignoring digest not meant for us"), } @@ -974,11 +1000,11 @@ where return Ok(()) } - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(Error::RuntimeApi)?; + let inherent_res = self + .client + .runtime_api() + .check_inherents(&block_id, block, inherent_data) + .map_err(Error::RuntimeApi)?; if !inherent_res.ok() { for (i, e) in inherent_res.into_errors() { @@ -1003,7 +1029,7 @@ where // don't report any equivocations during initial sync // as they are most likely stale. if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()); + return Ok(()) } // check if authorship of this header is an equivocation and return a proof if so. @@ -1053,8 +1079,8 @@ where Some(proof) => proof, None => { debug!(target: "babe", "Equivocation offender is not part of the authority set."); - return Ok(()); - } + return Ok(()) + }, }, }; @@ -1074,13 +1100,8 @@ where } } -type BlockVerificationResult = Result< - ( - BlockImportParams, - Option)>>, - ), - String, ->; +type BlockVerificationResult = + Result<(BlockImportParams, Option)>>), String>; #[async_trait::async_trait] impl Verifier @@ -1129,24 +1150,26 @@ where let slot_now = create_inherent_data_providers.slot(); - let parent_header_metadata = self.client.header_metadata(parent_hash) + let parent_header_metadata = self + .client + .header_metadata(parent_hash) .map_err(Error::::FetchParentHeader)?; let pre_digest = find_pre_digest::(&header)?; let (check_header, epoch_descriptor) = { let epoch_changes = self.epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot(), - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot(), + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; // We add one to the current slot to allow for some small drift. // FIXME #1019 in the future, alter this queue to allow deferring of headers @@ -1162,20 +1185,25 @@ where match check_header { CheckedHeader::Checked(pre_header, verified_info) => { - let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() + let babe_pre_digest = verified_info + .pre_digest + .as_babe_pre_digest() .expect("check_header always returns a pre-digest digest item; qed"); let slot = babe_pre_digest.slot(); // the header is valid but let's check if there was something else already // proposed at the same slot by the given author. if there was, we will // report the equivocation to the runtime. - if let Err(err) = self.check_and_report_equivocation( - slot_now, - slot, - &header, - &verified_info.author, - &origin, - ).await { + if let Err(err) = self + .check_and_report_equivocation( + slot_now, + slot, + &header, + &verified_info.author, + &origin, + ) + .await + { warn!(target: "babe", "Error checking/reporting BABE equivocation: {:?}", err); } @@ -1183,7 +1211,8 @@ where // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. if let Some(inner_body) = body.take() { - let mut inherent_data = create_inherent_data_providers.create_inherent_data() + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() .map_err(Error::::CreateInherents)?; inherent_data.babe_replace_inherent_data(slot); let block = Block::new(pre_header.clone(), inner_body); @@ -1193,7 +1222,8 @@ where BlockId::Hash(parent_hash), inherent_data, create_inherent_data_providers, - ).await?; + ) + .await?; let (_, inner_body) = block.deconstruct(); body = Some(inner_body); @@ -1218,7 +1248,7 @@ where import_block.post_hash = Some(hash); Ok((import_block, Default::default())) - } + }, CheckedHeader::Deferred(a, b) => { debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); telemetry!( @@ -1228,7 +1258,7 @@ where "hash" => ?hash, "a" => ?a, "b" => ?b ); Err(Error::::TooFarInFuture(hash).into()) - } + }, } } } @@ -1266,22 +1296,23 @@ impl BabeBlockImport { block_import: I, config: Config, ) -> Self { - BabeBlockImport { - client, - inner: block_import, - epoch_changes, - config, - } + BabeBlockImport { client, inner: block_import, epoch_changes, config } } } #[async_trait::async_trait] -impl BlockImport for BabeBlockImport where +impl BlockImport for BabeBlockImport +where Block: BlockT, Inner: BlockImport> + Send + Sync, Inner::Error: Into, - Client: HeaderBackend + HeaderMetadata - + AuxStore + ProvideRuntimeApi + ProvideCache + Send + Sync, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + ProvideCache + + Send + + Sync, Client::Api: BabeApi + ApiExt, { type Error = ConsensusError; @@ -1308,30 +1339,33 @@ impl BlockImport for BabeBlockImport return Err(ConsensusError::ClientImport(e.to_string())), } - let pre_digest = find_pre_digest::(&block.header) - .expect("valid babe headers must contain a predigest; \ - header has been already verified; qed"); + let pre_digest = find_pre_digest::(&block.header).expect( + "valid babe headers must contain a predigest; \ + header has been already verified; qed", + ); let slot = pre_digest.slot(); let parent_hash = *block.header.parent_hash(); - let parent_header = self.client.header(BlockId::Hash(parent_hash)) + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| ConsensusError::ChainLookup(babe_err( - Error::::ParentUnavailable(parent_hash, hash) - ).into()))?; - - let parent_slot = find_pre_digest::(&parent_header) - .map(|d| d.slot()) - .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ - header has already been verified; qed"); + .ok_or_else(|| { + ConsensusError::ChainLookup( + babe_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + + let parent_slot = find_pre_digest::(&parent_header).map(|d| d.slot()).expect( + "parent is non-genesis; valid BABE headers contain a pre-digest; \ + header has already been verified; qed", + ); // make sure that slot number is strictly increasing if slot <= parent_slot { - return Err( - ConsensusError::ClientImport(babe_err( - Error::::SlotMustIncrease(parent_slot, slot) - ).into()) - ); + return Err(ConsensusError::ClientImport( + babe_err(Error::::SlotMustIncrease(parent_slot, slot)).into(), + )) } // if there's a pending epoch we'll save the previous epoch changes here @@ -1354,14 +1388,16 @@ impl BlockImport for BabeBlockImport::ParentBlockNoAssociatedWeight(hash)).into() - ))? + .ok_or_else(|| { + ConsensusError::ClientImport( + babe_err(Error::::ParentBlockNoAssociatedWeight(hash)) + .into(), + ) + })? }; - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; let epoch_descriptor = intermediate.epoch_descriptor; let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); @@ -1379,27 +1415,18 @@ impl BlockImport for BabeBlockImport {}, (false, false, false) => {}, - (false, false, true) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedConfigChange).into(), - ) - ) - }, - (true, false, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), - ) - ) - }, - (false, true, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedEpochChange).into(), - ) - ) - }, + (false, false, true) => + return Err(ConsensusError::ClientImport( + babe_err(Error::::UnexpectedConfigChange).into(), + )), + (true, false, _) => + return Err(ConsensusError::ClientImport( + babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), + )), + (false, true, _) => + return Err(ConsensusError::ClientImport( + babe_err(Error::::UnexpectedEpochChange).into(), + )), } let info = self.client.info(); @@ -1407,16 +1434,15 @@ impl BlockImport for BabeBlockImport::FetchEpoch(parent_hash).into()) - })?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; - let epoch_config = next_config_digest.map(Into::into).unwrap_or_else( - || viable_epoch.as_ref().config.clone() - ); + let epoch_config = next_config_digest + .map(Into::into) + .unwrap_or_else(|| viable_epoch.as_ref().config.clone()); // restrict info logging during initial sync to avoid spam let log_level = if block.origin == BlockOrigin::NetworkInitialSync { @@ -1450,43 +1476,40 @@ impl BlockImport for BabeBlockImport( - &*epoch_changes, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) - ); + crate::aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); } - aux_schema::write_block_weight( - hash, - total_weight, - |values| block.auxiliary.extend( - values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ), - ); + aux_schema::write_block_weight(hash, total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); // The fork choice rule is that we pick the heaviest chain (i.e. // more primary blocks), if there's a tie we go with the longest @@ -1501,9 +1524,11 @@ impl BlockImport for BabeBlockImport last_best_weight { @@ -1544,30 +1569,38 @@ impl BlockImport for BabeBlockImport( client: Arc, epoch_changes: &mut EpochChangesFor, -) -> Result<(), ConsensusError> where +) -> Result<(), ConsensusError> +where Block: BlockT, Client: HeaderBackend + HeaderMetadata, { let info = client.info(); let finalized_slot = { - let finalized_header = client.header(BlockId::Hash(info.finalized_hash)) + let finalized_header = client + .header(BlockId::Hash(info.finalized_hash)) .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))? - .expect("best finalized hash was given by client; \ - finalized headers must exist in db; qed"); + .expect( + "best finalized hash was given by client; \ + finalized headers must exist in db; qed", + ); find_pre_digest::(&finalized_header) - .expect("finalized header must be valid; \ - valid blocks have a pre-digest; qed") + .expect( + "finalized header must be valid; \ + valid blocks have a pre-digest; qed", + ) .slot() }; - epoch_changes.prune_finalized( - descendent_query(&*client), - &info.finalized_hash, - info.finalized_number, - finalized_slot, - ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; Ok(()) } @@ -1586,25 +1619,14 @@ where Client: AuxStore + HeaderBackend + HeaderMetadata, { let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; - let link = BabeLink { - epoch_changes: epoch_changes.clone(), - config: config.clone(), - }; + let link = BabeLink { epoch_changes: epoch_changes.clone(), config: config.clone() }; // NOTE: this isn't entirely necessary, but since we didn't use to prune the // epoch tree it is useful as a migration, so that nodes prune long trees on // startup rather than waiting until importing the next epoch change block. - prune_finalized( - client.clone(), - &mut epoch_changes.shared_data(), - )?; + prune_finalized(client.clone(), &mut epoch_changes.shared_data())?; - let import = BabeBlockImport::new( - client, - epoch_changes, - wrapped_block_import, - config, - ); + let import = BabeBlockImport::new(client, epoch_changes, wrapped_block_import, config); Ok((import, link)) } @@ -1629,12 +1651,23 @@ pub fn import_queue( registry: Option<&Registry>, can_author_with: CAW, telemetry: Option, -) -> ClientResult> where - Inner: BlockImport> - + Send + Sync + 'static, - Client: ProvideRuntimeApi + ProvideCache + HeaderBackend - + HeaderMetadata + AuxStore - + Send + Sync + 'static, +) -> ClientResult> +where + Inner: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync + + 'static, + Client: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata + + AuxStore + + Send + + Sync + + 'static, Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, @@ -1651,11 +1684,5 @@ pub fn import_queue( client, }; - Ok(BasicQueue::new( - verifier, - Box::new(block_import), - justification_import, - spawner, - registry, - )) + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) } diff --git a/substrate/client/consensus/babe/src/migration.rs b/substrate/client/consensus/babe/src/migration.rs index fec73667da48d00f54647aac22d88b7f13358f5c..a248c9da24db81d5229938cef5bdbadacb0156df 100644 --- a/substrate/client/consensus/babe/src/migration.rs +++ b/substrate/client/consensus/babe/src/migration.rs @@ -16,12 +16,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use codec::{Encode, Decode}; -use sc_consensus_epochs::Epoch as EpochT; use crate::{ - Epoch, AuthorityId, BabeAuthorityWeight, BabeGenesisConfiguration, - BabeEpochConfiguration, VRF_OUTPUT_LENGTH, NextEpochDescriptor, + AuthorityId, BabeAuthorityWeight, BabeEpochConfiguration, BabeGenesisConfiguration, Epoch, + NextEpochDescriptor, VRF_OUTPUT_LENGTH, }; +use codec::{Decode, Encode}; +use sc_consensus_epochs::Epoch as EpochT; use sp_consensus_slots::Slot; /// BABE epoch information, version 0. @@ -43,10 +43,7 @@ impl EpochT for EpochV0 { type NextEpochDescriptor = NextEpochDescriptor; type Slot = Slot; - fn increment( - &self, - descriptor: NextEpochDescriptor - ) -> EpochV0 { + fn increment(&self, descriptor: NextEpochDescriptor) -> EpochV0 { EpochV0 { epoch_index: self.epoch_index + 1, start_slot: self.start_slot + self.duration, @@ -74,10 +71,7 @@ impl EpochV0 { duration: self.duration, authorities: self.authorities, randomness: self.randomness, - config: BabeEpochConfiguration { - c: config.c, - allowed_slots: config.allowed_slots, - }, + config: BabeEpochConfiguration { c: config.c, allowed_slots: config.allowed_slots }, } } } diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 3392ffade98ee1de9d75a5e9f0fbc409d3ecd27d..18c016bbf10353ea9e73eb11709284e9b99ba80c 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -23,35 +23,33 @@ #![allow(deprecated)] use super::*; use authorship::claim_slot; -use sp_core::crypto::Pair; -use sp_keystore::{ - SyncCryptoStore, - vrf::make_transcript as transcript_from_data, -}; -use sp_consensus_babe::{ - AuthorityPair, Slot, AllowedSlots, make_transcript, make_transcript_data, - inherents::InherentDataProvider, -}; -use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; +use futures::executor::block_on; +use log::debug; +use rand::RngCore; +use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_client_api::{backend::TransactionFor, BlockchainEvents}; +use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; +use sc_keystore::LocalKeystore; +use sc_network::config::ProtocolConfig; +use sc_network_test::{Block as TestBlock, *}; +use sp_application_crypto::key_types::BABE; use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, DisableProofRecording, AlwaysCanAuthor, import_queue::{BoxBlockImport, BoxJustificationImport}, + AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal, }; -use sc_network_test::{Block as TestBlock, *}; -use sc_network::config::ProtocolConfig; -use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; -use sc_client_api::{BlockchainEvents, backend::TransactionFor}; -use log::debug; -use std::{time::Duration, cell::RefCell, task::Poll}; -use rand::RngCore; -use rand_chacha::{ - rand_core::SeedableRng, ChaChaRng, +use sp_consensus_babe::{ + inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, + AuthorityPair, Slot, +}; +use sp_core::crypto::Pair; +use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; +use sp_runtime::{ + generic::DigestItem, + traits::{Block as BlockT, DigestFor}, }; -use sc_keystore::LocalKeystore; -use sp_application_crypto::key_types::BABE; -use futures::executor::block_on; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; +use std::{cell::RefCell, task::Poll, time::Duration}; type Item = DigestItem; @@ -95,10 +93,7 @@ impl Environment for DummyFactory { type Proposer = DummyProposer; type Error = Error; - fn init(&mut self, parent_header: &::Header) - -> Self::CreateProposer - { - + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { let parent_slot = crate::find_pre_digest::(parent_header) .expect("parent header has a pre-digest") .slot(); @@ -113,23 +108,24 @@ impl Environment for DummyFactory { } impl DummyProposer { - fn propose_with(&mut self, pre_digests: DigestFor) - -> future::Ready< - Result< - Proposal< - TestBlock, - sc_client_api::TransactionFor, - () - >, - Error - > - > - { - let block_builder = self.factory.client.new_block_at( - &BlockId::Hash(self.parent_hash), - pre_digests, - false, - ).unwrap(); + fn propose_with( + &mut self, + pre_digests: DigestFor, + ) -> future::Ready< + Result< + Proposal< + TestBlock, + sc_client_api::TransactionFor, + (), + >, + Error, + >, + > { + let block_builder = self + .factory + .client + .new_block_at(&BlockId::Hash(self.parent_hash), pre_digests, false) + .unwrap(); let mut block = match block_builder.build().map_err(|e| e.into()) { Ok(b) => b.block, @@ -143,13 +139,14 @@ impl DummyProposer { // figure out if we should add a consensus digest, since the test runtime // doesn't. let epoch_changes = self.factory.epoch_changes.shared_data(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(&*self.factory.client), - &self.parent_hash, - self.parent_number, - this_slot, - |slot| Epoch::genesis(&self.factory.config, slot), - ) + let epoch = epoch_changes + .epoch_data_for_child_of( + descendent_query(&*self.factory.client), + &self.parent_hash, + self.parent_number, + this_slot, + |slot| Epoch::genesis(&self.factory.config, slot), + ) .expect("client has data to find epoch") .expect("can compute epoch for baked block"); @@ -162,7 +159,8 @@ impl DummyProposer { let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { authorities: epoch.authorities.clone(), randomness: epoch.randomness.clone(), - }).encode(); + }) + .encode(); let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); block.header.digest_mut().push(digest) } @@ -176,7 +174,8 @@ impl DummyProposer { impl Proposer for DummyProposer { type Error = Error; - type Transaction = sc_client_api::TransactionFor; + type Transaction = + sc_client_api::TransactionFor; type Proposal = future::Ready, Error>>; type ProofRecording = DisableProofRecording; type Proof = (); @@ -201,9 +200,9 @@ pub struct PanickingBlockImport(B); #[async_trait::async_trait] impl> BlockImport for PanickingBlockImport - where - B::Transaction: Send, - B: Send, +where + B::Transaction: Send, + B: Send, { type Error = B::Error; type Transaction = B::Transaction; @@ -233,10 +232,8 @@ pub struct BabeTestNet { type TestHeader = ::Header; type TestExtrinsic = ::Extrinsic; -type TestSelectChain = substrate_test_runtime_client::LongestChain< - substrate_test_runtime_client::Backend, - TestBlock, ->; +type TestSelectChain = + substrate_test_runtime_client::LongestChain; pub struct TestVerifier { inner: BabeVerifier< @@ -244,11 +241,13 @@ pub struct TestVerifier { PeersFullClient, TestSelectChain, AlwaysCanAuthor, - Box> + Box< + dyn CreateInherentDataProviders< + TestBlock, + (), + InherentDataProviders = (TimestampInherentDataProvider, InherentDataProvider), + >, + >, >, mutator: Mutator, } @@ -274,7 +273,12 @@ impl Verifier for TestVerifier { pub struct PeerData { link: BabeLink, block_import: Mutex< - Option>> + Option< + BoxBlockImport< + TestBlock, + TransactionFor, + >, + >, >, } @@ -286,32 +290,27 @@ impl TestNetFactory for BabeTestNet { /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { debug!(target: "babe", "Creating test network from config"); - BabeTestNet { - peers: Vec::new(), - } + BabeTestNet { peers: Vec::new() } } - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option, - ) - { + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option, + ) { let client = client.as_full().expect("only full clients are tested"); let config = Config::get_or_compute(&*client).expect("config available"); - let (block_import, link) = crate::block_import( - config, - client.clone(), - client.clone(), - ).expect("can initialize block-import"); + let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) + .expect("can initialize block-import"); let block_import = PanickingBlockImport(block_import); - let data_block_import = Mutex::new( - Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) - ); + let data_block_import = + Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>)); ( BlockImportAdapter::new(block_import), None, @@ -324,16 +323,16 @@ impl TestNetFactory for BabeTestNet { client: PeersClient, _cfg: &ProtocolConfig, maybe_link: &Option, - ) - -> Self::Verifier - { + ) -> Self::Verifier { use substrate_test_runtime_client::DefaultTestClientBuilderExt; let client = client.as_full().expect("only full clients are used in test"); trace!(target: "babe", "Creating a verifier"); // ensure block import and verifier are linked correctly. - let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); + let data = maybe_link + .as_ref() + .expect("babe link always provided to verifier instantiation"); let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); @@ -369,10 +368,7 @@ impl TestNetFactory for BabeTestNet { &self.peers } - fn mut_peers)>( - &mut self, - closure: F, - ) { + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -382,9 +378,7 @@ impl TestNetFactory for BabeTestNet { fn rejects_empty_block() { sp_tracing::try_init_simple(); let mut net = BabeTestNet::new(3); - let block_builder = |builder: BlockBuilder<_, _, _>| { - builder.build().unwrap().block - }; + let block_builder = |builder: BlockBuilder<_, _, _>| builder.build().unwrap().block; net.mut_peers(|peer| { peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); }) @@ -397,11 +391,7 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); let net = BabeTestNet::new(3); - let peers = &[ - (0, "//Alice"), - (1, "//Bob"), - (2, "//Charlie"), - ]; + let peers = &[(0, "//Alice"), (1, "//Bob"), (2, "//Charlie")]; let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); @@ -415,9 +405,10 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static let select_chain = peer.select_chain().expect("Full client has select_chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); - SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(seed)).expect("Generates authority key"); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); + SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some(seed)) + .expect("Generates authority key"); keystore_paths.push(keystore_path); let mut got_own = false; @@ -435,47 +426,54 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static import_notifications.push( // run each future until we get one of our own blocks with number higher than 5 // that was produced locally. - client.import_notification_stream() - .take_while(move |n| future::ready(n.header.number() < &5 || { - if n.origin == BlockOrigin::Own { - got_own = true; - } else { - got_other = true; - } - - // continue until we have at least one block of our own - // and one of another peer. - !(got_own && got_other) - })) - .for_each(|_| future::ready(()) ) + client + .import_notification_stream() + .take_while(move |n| { + future::ready( + n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + + // continue until we have at least one block of our own + // and one of another peer. + !(got_own && got_other) + }, + ) + }) + .for_each(|_| future::ready(())), ); + babe_futures.push( + start_babe(BabeParams { + block_import: data.block_import.lock().take().expect("import set up during init"), + select_chain, + client, + env: environ, + sync_oracle: DummyOracle, + create_inherent_data_providers: Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); - babe_futures.push(start_babe(BabeParams { - block_import: data.block_import.lock().take().expect("import set up during init"), - select_chain, - client, - env: environ, - sync_oracle: DummyOracle, - create_inherent_data_providers: Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); - let slot = InherentDataProvider::from_timestamp_and_duration( - *timestamp, - Duration::from_secs(6), - ); - - Ok((timestamp, slot)) - }), - force_authoring: false, - backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), - babe_link: data.link.clone(), - keystore, - can_author_with: sp_consensus::AlwaysCanAuthor, - justification_sync_link: (), - block_proposal_slot_portion: SlotProportion::new(0.5), - max_block_proposal_slot_portion: None, - telemetry: None, - }).expect("Starts babe")); + Ok((timestamp, slot)) + }), + force_authoring: false, + backoff_authoring_blocks: Some(BackoffAuthoringOnFinalizedHeadLagging::default()), + babe_link: data.link.clone(), + keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, + justification_sync_link: (), + block_proposal_slot_portion: SlotProportion::new(0.5), + max_block_proposal_slot_portion: None, + telemetry: None, + }) + .expect("Starts babe"), + ); } block_on(future::select( futures::future::poll_fn(move |cx| { @@ -489,7 +487,7 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static Poll::<()>::Pending }), - future::select(future::join_all(import_notifications), future::join_all(babe_futures)) + future::select(future::join_all(import_notifications), future::join_all(babe_futures)), )); } @@ -503,7 +501,8 @@ fn authoring_blocks() { fn rejects_missing_inherent_digest() { run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::take(&mut header.digest_mut().logs); - header.digest_mut().logs = v.into_iter() + header.digest_mut().logs = v + .into_iter() .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) .collect() }) @@ -514,7 +513,8 @@ fn rejects_missing_inherent_digest() { fn rejects_missing_seals() { run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::take(&mut header.digest_mut().logs); - header.digest_mut().logs = v.into_iter() + header.digest_mut().logs = v + .into_iter() .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) .collect() }) @@ -525,7 +525,8 @@ fn rejects_missing_seals() { fn rejects_missing_consensus_digests() { run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::take(&mut header.digest_mut().logs); - header.digest_mut().logs = v.into_iter() + header.digest_mut().logs = v + .into_iter() .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) .collect() }); @@ -560,8 +561,8 @@ fn sig_is_not_pre_digest() { fn can_author_block() { sp_tracing::try_init_simple(); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) .expect("Generates authority pair"); @@ -601,8 +602,8 @@ fn can_author_block() { None => i += 1, Some(s) => { debug!(target: "babe", "Authored block {:?}", s.0); - break; - } + break + }, } } } @@ -622,26 +623,27 @@ fn propose_and_import_block( }); let pre_digest = sp_runtime::generic::Digest { - logs: vec![ - Item::babe_pre_digest( - PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - authority_index: 0, - slot, - }), - ), - ], + logs: vec![Item::babe_pre_digest(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + authority_index: 0, + slot, + }))], }; let parent_hash = parent.hash(); let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - let epoch_descriptor = proposer_factory.epoch_changes.shared_data().epoch_descriptor_for_child_of( - descendent_query(&*proposer_factory.client), - &parent_hash, - *parent.number(), - slot, - ).unwrap().unwrap(); + let epoch_descriptor = proposer_factory + .epoch_changes + .shared_data() + .epoch_descriptor_for_child_of( + descendent_query(&*proposer_factory.client), + &parent_hash, + *parent.number(), + slot, + ) + .unwrap() + .unwrap(); let seal = { // sign the pre-sealed hash of the block and then @@ -706,13 +708,12 @@ fn importing_block_one_sets_genesis_epoch() { let genesis_epoch = Epoch::genesis(&data.link.config, 999.into()); let epoch_changes = data.link.epoch_changes.shared_data(); - let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( - descendent_query(&*client), - &block_hash, - 1, - 1000.into(), - |slot| Epoch::genesis(&data.link.config, slot), - ).unwrap().unwrap(); + let epoch_for_second_block = epoch_changes + .epoch_data_for_child_of(descendent_query(&*client), &block_hash, 1, 1000.into(), |slot| { + Epoch::genesis(&data.link.config, slot) + }) + .unwrap() + .unwrap(); assert_eq!(epoch_for_second_block, genesis_epoch); } @@ -779,16 +780,10 @@ fn importing_epoch_change_block_prunes_tree() { let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10); // We should be tracking a total of 9 epochs in the fork tree - assert_eq!( - epoch_changes.shared_data().tree().iter().count(), - 9, - ); + assert_eq!(epoch_changes.shared_data().tree().iter().count(), 9,); // And only one root - assert_eq!( - epoch_changes.shared_data().tree().roots().count(), - 1, - ); + assert_eq!(epoch_changes.shared_data().tree().roots().count(), 1,); // We finalize block #13 from the canon chain, so on the next epoch // change the tree should be pruned, to not contain F (#7). @@ -796,32 +791,47 @@ fn importing_epoch_change_block_prunes_tree() { propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7); // at this point no hashes from the first fork must exist on the tree - assert!( - !epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), - ); + assert!(!epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_1.contains(h)),); // but the epoch changes from the other forks must still exist - assert!( - epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) - ); - - assert!( - epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h))); + + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); // finalizing block #25 from the canon chain should prune out the second fork client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8); // at this point no hashes from the second fork must exist on the tree - assert!( - !epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), - ); + assert!(!epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h)),); // while epoch changes from the last fork should still exist - assert!( - epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); + assert!(epoch_changes + .shared_data() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); } #[test] @@ -856,20 +866,15 @@ fn verify_slots_are_strictly_increasing() { // we should fail to import this block since the slot number didn't increase. // we will panic due to the `PanickingBlockImport` defined above. - propose_and_import_block( - &b1, - Some(999.into()), - &mut proposer_factory, - &mut block_import, - ); + propose_and_import_block(&b1, Some(999.into()), &mut proposer_factory, &mut block_import); } #[test] fn babe_transcript_generation_match() { sp_tracing::try_init_simple(); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore: SyncCryptoStorePtr = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); let public = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) .expect("Generates authority pair"); @@ -890,9 +895,7 @@ fn babe_transcript_generation_match() { let test = |t: merlin::Transcript| -> [u8; 16] { let mut b = [0u8; 16]; - t.build_rng() - .finalize(&mut ChaChaRng::from_seed([0u8;32])) - .fill_bytes(&mut b); + t.build_rng().finalize(&mut ChaChaRng::from_seed([0u8; 32])).fill_bytes(&mut b); b }; debug_assert!(test(orig_transcript) == test(transcript_from_data(new_transcript))); diff --git a/substrate/client/consensus/babe/src/verification.rs b/substrate/client/consensus/babe/src/verification.rs index 469286f5110d73160d3782cb35c403226bd084c0..af118312dd07c5b039b3d28cc00e6e229bc33fad 100644 --- a/substrate/client/consensus/babe/src/verification.rs +++ b/substrate/client/consensus/babe/src/verification.rs @@ -17,18 +17,22 @@ // along with this program. If not, see . //! Verification for BABE headers. -use sp_runtime::{traits::Header, traits::DigestItemFor}; -use sp_core::{Pair, Public}; -use sp_consensus_babe::{make_transcript, AuthoritySignature, AuthorityPair, AuthorityId}; -use sp_consensus_babe::digests::{ - PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, - CompatibleDigestItem +use super::{ + authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}, + babe_err, find_pre_digest, BlockT, Epoch, Error, }; +use log::{debug, trace}; use sc_consensus_slots::CheckedHeader; +use sp_consensus_babe::{ + digests::{ + CompatibleDigestItem, PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, + SecondaryVRFPreDigest, + }, + make_transcript, AuthorityId, AuthorityPair, AuthoritySignature, +}; use sp_consensus_slots::Slot; -use log::{debug, trace}; -use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; -use super::authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; +use sp_core::{Pair, Public}; +use sp_runtime::traits::{DigestItemFor, Header}; /// BABE verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { @@ -57,26 +61,24 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// with each having different validation logic. pub(super) fn check_header( params: VerificationParams, -) -> Result>, Error> where +) -> Result>, Error> +where DigestItemFor: CompatibleDigestItem, { - let VerificationParams { - mut header, - pre_digest, - slot_now, - epoch, - } = params; + let VerificationParams { mut header, pre_digest, slot_now, epoch } = params; let authorities = &epoch.authorities; let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; trace!(target: "babe", "Checking header"); - let seal = header.digest_mut().pop() + let seal = header + .digest_mut() + .pop() .ok_or_else(|| babe_err(Error::HeaderUnsealed(header.hash())))?; - let sig = seal.as_babe_seal().ok_or_else(|| { - babe_err(Error::HeaderBadSeal(header.hash())) - })?; + let sig = seal + .as_babe_seal() + .ok_or_else(|| babe_err(Error::HeaderBadSeal(header.hash())))?; // the pre-hash of the header doesn't include the seal // and that's what we sign @@ -84,7 +86,7 @@ pub(super) fn check_header( if pre_digest.slot() > slot_now { header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot())); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot())) } let author = match authorities.get(pre_digest.authority_index() as usize) { @@ -100,45 +102,31 @@ pub(super) fn check_header( primary.slot, ); - check_primary_header::( - pre_hash, - primary, - sig, - &epoch, - epoch.config.c, - )?; + check_primary_header::(pre_hash, primary, sig, &epoch, epoch.config.c)?; }, - PreDigest::SecondaryPlain(secondary) if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() => { + PreDigest::SecondaryPlain(secondary) + if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() => + { debug!(target: "babe", "Verifying secondary plain block #{} at slot: {}", header.number(), secondary.slot, ); - check_secondary_plain_header::( - pre_hash, - secondary, - sig, - &epoch, - )?; - }, - PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { + check_secondary_plain_header::(pre_hash, secondary, sig, &epoch)?; + } + PreDigest::SecondaryVRF(secondary) + if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => + { debug!(target: "babe", "Verifying secondary VRF block #{} at slot: {}", header.number(), secondary.slot, ); - check_secondary_vrf_header::( - pre_hash, - secondary, - sig, - &epoch, - )?; - }, - _ => { - return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)); + check_secondary_vrf_header::(pre_hash, secondary, sig, &epoch)?; } + _ => return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)), } let info = VerifiedHeaderInfo { @@ -170,27 +158,20 @@ fn check_primary_header( if AuthorityPair::verify(&signature, pre_hash, &author) { let (inout, _) = { - let transcript = make_transcript( - &epoch.randomness, - pre_digest.slot, - epoch.epoch_index, - ); + let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) - }).map_err(|s| { - babe_err(Error::VRFVerificationFailed(s)) - })? + schnorrkel::PublicKey::from_bytes(author.as_slice()) + .and_then(|p| { + p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) + }) + .map_err(|s| babe_err(Error::VRFVerificationFailed(s)))? }; - let threshold = calculate_primary_threshold( - c, - &epoch.authorities, - pre_digest.authority_index as usize, - ); + let threshold = + calculate_primary_threshold(c, &epoch.authorities, pre_digest.authority_index as usize); if !check_primary_threshold(&inout, threshold) { - return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); + return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))) } Ok(()) @@ -211,16 +192,14 @@ fn check_secondary_plain_header( ) -> Result<(), Error> { // check the signature is valid under the expected authority and // chain state. - let expected_author = secondary_slot_author( - pre_digest.slot, - &epoch.authorities, - epoch.randomness, - ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + let expected_author = + secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) + .ok_or_else(|| Error::NoSecondaryAuthorExpected)?; let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { @@ -239,30 +218,22 @@ fn check_secondary_vrf_header( ) -> Result<(), Error> { // check the signature is valid under the expected authority and // chain state. - let expected_author = secondary_slot_author( - pre_digest.slot, - &epoch.authorities, - epoch.randomness, - ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + let expected_author = + secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) + .ok_or_else(|| Error::NoSecondaryAuthorExpected)?; let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) } if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { - let transcript = make_transcript( - &epoch.randomness, - pre_digest.slot, - epoch.epoch_index, - ); - - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) - }).map_err(|s| { - babe_err(Error::VRFVerificationFailed(s)) - })?; + let transcript = make_transcript(&epoch.randomness, pre_digest.slot, epoch.epoch_index); + + schnorrkel::PublicKey::from_bytes(author.as_slice()) + .and_then(|p| p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof)) + .map_err(|s| babe_err(Error::VRFVerificationFailed(s)))?; Ok(()) } else { diff --git a/substrate/client/consensus/common/src/longest_chain.rs b/substrate/client/consensus/common/src/longest_chain.rs index e1fbb600fa44f1a22fbe18a7e3e5ef7fae8f991e..b1f7f94f9eb28da484bf7e895c9d956858b8572d 100644 --- a/substrate/client/consensus/common/src/longest_chain.rs +++ b/substrate/client/consensus/common/src/longest_chain.rs @@ -18,30 +18,26 @@ //! Longest chain implementation -use std::sync::Arc; -use std::marker::PhantomData; use sc_client_api::backend; -use sp_consensus::{SelectChain, Error as ConsensusError}; use sp_blockchain::{Backend, HeaderBackend}; +use sp_consensus::{Error as ConsensusError, SelectChain}; use sp_runtime::{ - traits::{NumberFor, Block as BlockT}, generic::BlockId, + traits::{Block as BlockT, NumberFor}, }; +use std::{marker::PhantomData, sync::Arc}; /// Implement Longest Chain Select implementation /// where 'longest' is defined as the highest number of blocks pub struct LongestChain { backend: Arc, - _phantom: PhantomData + _phantom: PhantomData, } impl Clone for LongestChain { fn clone(&self) -> Self { let backend = self.backend.clone(); - LongestChain { - backend, - _phantom: Default::default() - } + LongestChain { backend, _phantom: Default::default() } } } @@ -52,21 +48,22 @@ where { /// Instantiate a new LongestChain for Backend B pub fn new(backend: Arc) -> Self { - LongestChain { - backend, - _phantom: Default::default(), - } + LongestChain { backend, _phantom: Default::default() } } fn best_block_header(&self) -> sp_blockchain::Result<::Header> { let info = self.backend.blockchain().info(); let import_lock = self.backend.get_import_lock(); - let best_hash = self.backend + let best_hash = self + .backend .blockchain() .best_containing(info.best_hash, None, import_lock)? .unwrap_or(info.best_hash); - Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))? + Ok(self + .backend + .blockchain() + .header(BlockId::Hash(best_hash))? .expect("given block hash was fetched from block in db; qed")) } diff --git a/substrate/client/consensus/common/src/shared_data.rs b/substrate/client/consensus/common/src/shared_data.rs index 8132a42a4b9296bb2c9c3f77b6915a3124b50743..e1797bc6f517ea344eb4ae9cd2b0dea124b174ce 100644 --- a/substrate/client/consensus/common/src/shared_data.rs +++ b/substrate/client/consensus/common/src/shared_data.rs @@ -18,8 +18,8 @@ //! Provides a generic wrapper around shared data. See [`SharedData`] for more information. +use parking_lot::{Condvar, MappedMutexGuard, Mutex, MutexGuard}; use std::sync::Arc; -use parking_lot::{Mutex, MappedMutexGuard, Condvar, MutexGuard}; /// Created by [`SharedDataLocked::release_mutex`]. /// @@ -75,8 +75,7 @@ impl<'a, T> SharedDataLocked<'a, T> { /// Release the mutex, but keep the shared data locked. pub fn release_mutex(mut self) -> SharedDataLockedUpgradable { SharedDataLockedUpgradable { - shared_data: self.shared_data.take() - .expect("`shared_data` is only taken on drop; qed"), + shared_data: self.shared_data.take().expect("`shared_data` is only taken on drop; qed"), } } } @@ -132,7 +131,7 @@ struct SharedDataInner { /// # Example /// /// ``` -///# use sc_consensus::shared_data::SharedData; +/// # use sc_consensus::shared_data::SharedData; /// /// let shared_data = SharedData::new(String::from("hello world")); /// @@ -174,10 +173,7 @@ pub struct SharedData { impl Clone for SharedData { fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - cond_var: self.cond_var.clone(), - } + Self { inner: self.inner.clone(), cond_var: self.cond_var.clone() } } } @@ -228,10 +224,7 @@ impl SharedData { debug_assert!(!guard.locked); guard.locked = true; - SharedDataLocked { - inner: guard, - shared_data: Some(self.clone()), - } + SharedDataLocked { inner: guard, shared_data: Some(self.clone()) } } } diff --git a/substrate/client/consensus/epochs/src/lib.rs b/substrate/client/consensus/epochs/src/lib.rs index 98a3e83530510fc8fe695a56d19be69ead0052fb..e93724e5895f23c6540eacf0b4307a5a7365b85b 100644 --- a/substrate/client/consensus/epochs/src/lib.rs +++ b/substrate/client/consensus/epochs/src/lib.rs @@ -20,12 +20,16 @@ pub mod migration; -use std::{ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use fork_tree::ForkTree; use sc_client_api::utils::is_descendent_of; -use sp_blockchain::{HeaderMetadata, HeaderBackend, Error as ClientError}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; +use std::{ + borrow::{Borrow, BorrowMut}, + collections::BTreeMap, + ops::Add, +}; /// A builder for `is_descendent_of` functions. pub trait IsDescendentOfBuilder { @@ -41,8 +45,7 @@ pub trait IsDescendentOfBuilder { /// details aren't yet stored, but its parent is. /// /// The format of `current` when `Some` is `(current, current_parent)`. - fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) - -> Self::IsDescendentOf; + fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) -> Self::IsDescendentOf; } /// Produce a descendent query object given the client. @@ -55,16 +58,18 @@ pub fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder< pub struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); impl<'a, H, Block> IsDescendentOfBuilder - for HeaderBackendDescendentBuilder<&'a H, Block> where - H: HeaderBackend + HeaderMetadata, + for HeaderBackendDescendentBuilder<&'a H, Block> +where + H: HeaderBackend + HeaderMetadata, Block: BlockT, { type Error = ClientError; type IsDescendentOf = Box Result + 'a>; - fn build_is_descendent_of(&self, current: Option<(Block::Hash, Block::Hash)>) - -> Self::IsDescendentOf - { + fn build_is_descendent_of( + &self, + current: Option<(Block::Hash, Block::Hash)>, + ) -> Self::IsDescendentOf { Box::new(is_descendent_of(self.0, current)) } } @@ -90,10 +95,7 @@ pub trait Epoch { impl<'a, E: Epoch> From<&'a E> for EpochHeader { fn from(epoch: &'a E) -> EpochHeader { - Self { - start_slot: epoch.start_slot(), - end_slot: epoch.end_slot(), - } + Self { start_slot: epoch.start_slot(), end_slot: epoch.end_slot() } } } @@ -109,10 +111,7 @@ pub struct EpochHeader { impl Clone for EpochHeader { fn clone(&self) -> Self { - Self { - start_slot: self.start_slot, - end_slot: self.end_slot, - } + Self { start_slot: self.start_slot, end_slot: self.end_slot } } } @@ -149,7 +148,8 @@ pub enum ViableEpoch { Signaled(ERef), } -impl AsRef for ViableEpoch where +impl AsRef for ViableEpoch +where ERef: Borrow, { fn as_ref(&self) -> &E { @@ -160,7 +160,8 @@ impl AsRef for ViableEpoch where } } -impl AsMut for ViableEpoch where +impl AsMut for ViableEpoch +where ERef: BorrowMut, { fn as_mut(&mut self) -> &mut E { @@ -171,7 +172,8 @@ impl AsMut for ViableEpoch where } } -impl ViableEpoch where +impl ViableEpoch +where E: Epoch + Clone, ERef: Borrow, { @@ -187,18 +189,14 @@ impl ViableEpoch where /// Get cloned value for the viable epoch. pub fn into_cloned(self) -> ViableEpoch { match self { - ViableEpoch::UnimportedGenesis(e) => - ViableEpoch::UnimportedGenesis(e), + ViableEpoch::UnimportedGenesis(e) => ViableEpoch::UnimportedGenesis(e), ViableEpoch::Signaled(e) => ViableEpoch::Signaled(e.borrow().clone()), } } /// Increment the epoch, yielding an `IncrementedEpoch` to be imported /// into the fork-tree. - pub fn increment( - &self, - next_descriptor: E::NextEpochDescriptor - ) -> IncrementedEpoch { + pub fn increment(&self, next_descriptor: E::NextEpochDescriptor) -> IncrementedEpoch { let next = self.as_ref().increment(next_descriptor); let to_persist = match *self { ViableEpoch::UnimportedGenesis(ref epoch_0) => @@ -216,7 +214,7 @@ pub enum ViableEpochDescriptor { /// The epoch is an unimported genesis, with given start slot number. UnimportedGenesis(E::Slot), /// The epoch is signaled and has been imported, with given identifier and header. - Signaled(EpochIdentifier, EpochHeader) + Signaled(EpochIdentifier, EpochHeader), } impl ViableEpochDescriptor { @@ -243,8 +241,7 @@ impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { match epoch { PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()), - PersistedEpoch::Regular(ref epoch_n) => - PersistedEpochHeader::Regular(epoch_n.into()), + PersistedEpoch::Regular(ref epoch_n) => PersistedEpochHeader::Regular(epoch_n.into()), } } } @@ -312,7 +309,8 @@ fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { h } -impl Default for EpochChanges where +impl Default for EpochChanges +where Hash: PartialEq + Ord, Number: Ord, { @@ -321,9 +319,10 @@ impl Default for EpochChanges where } } -impl EpochChanges where +impl EpochChanges +where Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, - Number: Ord + One + Zero + Add + Copy, + Number: Ord + One + Zero + Add + Copy, { /// Create a new epoch change. pub fn new() -> Self { @@ -337,51 +336,38 @@ impl EpochChanges where } /// Map the epoch changes from one storing data to a different one. - pub fn map(self, mut f: F) -> EpochChanges where - B: Epoch, + pub fn map(self, mut f: F) -> EpochChanges + where + B: Epoch, F: FnMut(&Hash, &Number, E) -> B, { EpochChanges { - inner: self.inner.map(&mut |_, _, header| { - match header { - PersistedEpochHeader::Genesis(epoch_0, epoch_1) => { - PersistedEpochHeader::Genesis( - EpochHeader { - start_slot: epoch_0.start_slot, - end_slot: epoch_0.end_slot, - }, - EpochHeader { - start_slot: epoch_1.start_slot, - end_slot: epoch_1.end_slot, - }, - ) - }, - PersistedEpochHeader::Regular(epoch_n) => { - PersistedEpochHeader::Regular( - EpochHeader { - start_slot: epoch_n.start_slot, - end_slot: epoch_n.end_slot, - }, - ) - }, - } + inner: self.inner.map(&mut |_, _, header| match header { + PersistedEpochHeader::Genesis(epoch_0, epoch_1) => PersistedEpochHeader::Genesis( + EpochHeader { start_slot: epoch_0.start_slot, end_slot: epoch_0.end_slot }, + EpochHeader { start_slot: epoch_1.start_slot, end_slot: epoch_1.end_slot }, + ), + PersistedEpochHeader::Regular(epoch_n) => + PersistedEpochHeader::Regular(EpochHeader { + start_slot: epoch_n.start_slot, + end_slot: epoch_n.end_slot, + }), }), - epochs: self.epochs.into_iter().map(|((hash, number), epoch)| { - let bepoch = match epoch { - PersistedEpoch::Genesis(epoch_0, epoch_1) => { - PersistedEpoch::Genesis( + epochs: self + .epochs + .into_iter() + .map(|((hash, number), epoch)| { + let bepoch = match epoch { + PersistedEpoch::Genesis(epoch_0, epoch_1) => PersistedEpoch::Genesis( f(&hash, &number, epoch_0), f(&hash, &number, epoch_1), - ) - }, - PersistedEpoch::Regular(epoch_n) => { - PersistedEpoch::Regular( - f(&hash, &number, epoch_n) - ) - }, - }; - ((hash, number), bepoch) - }).collect(), + ), + PersistedEpoch::Regular(epoch_n) => + PersistedEpoch::Regular(f(&hash, &number, epoch_n)), + }; + ((hash, number), bepoch) + }) + .collect(), } } @@ -395,25 +381,17 @@ impl EpochChanges where number: Number, slot: E::Slot, ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(None); + let is_descendent_of = descendent_of_builder.build_is_descendent_of(None); let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(_, ref epoch_1) => - slot >= epoch_1.end_slot, - PersistedEpochHeader::Regular(ref epoch_n) => - slot >= epoch_n.end_slot, + PersistedEpochHeader::Genesis(_, ref epoch_1) => slot >= epoch_1.end_slot, + PersistedEpochHeader::Regular(ref epoch_n) => slot >= epoch_n.end_slot, }; // prune any epochs which could not be _live_ as of the children of the // finalized block, i.e. re-root the fork tree to the oldest ancestor of // (hash, number) where epoch.end_slot() >= finalized_slot - let removed = self.inner.prune( - hash, - &number, - &is_descendent_of, - &predicate, - )?; + let removed = self.inner.prune(hash, &number, &is_descendent_of, &predicate)?; for (hash, number, _) in removed { self.epochs.remove(&(hash, number)); @@ -424,18 +402,18 @@ impl EpochChanges where /// Get a reference to an epoch with given identifier. pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { - self.epochs.get(&(id.hash, id.number)) - .and_then(|v| { - match v { - PersistedEpoch::Genesis(ref epoch_0, _) - if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), - PersistedEpoch::Genesis(_, ref epoch_1) - if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), - PersistedEpoch::Regular(ref epoch_n) - if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), - _ => None, - } - }) + self.epochs.get(&(id.hash, id.number)).and_then(|v| match v { + PersistedEpoch::Genesis(ref epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => + Some(epoch_0), + PersistedEpoch::Genesis(_, ref epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => + Some(epoch_1), + PersistedEpoch::Regular(ref epoch_n) + if id.position == EpochIdentifierPosition::Regular => + Some(epoch_n), + _ => None, + }) } /// Get a reference to a viable epoch with given descriptor. @@ -443,33 +421,32 @@ impl EpochChanges where &self, descriptor: &ViableEpochDescriptor, make_genesis: G, - ) -> Option> where - G: FnOnce(E::Slot) -> E + ) -> Option> + where + G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch(&identifier).map(ViableEpoch::Signaled) - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), + ViableEpochDescriptor::Signaled(identifier, _) => + self.epoch(&identifier).map(ViableEpoch::Signaled), } } /// Get a mutable reference to an epoch with given identifier. pub fn epoch_mut(&mut self, id: &EpochIdentifier) -> Option<&mut E> { - self.epochs.get_mut(&(id.hash, id.number)) - .and_then(|v| { - match v { - PersistedEpoch::Genesis(ref mut epoch_0, _) - if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), - PersistedEpoch::Genesis(_, ref mut epoch_1) - if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), - PersistedEpoch::Regular(ref mut epoch_n) - if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), - _ => None, - } - }) + self.epochs.get_mut(&(id.hash, id.number)).and_then(|v| match v { + PersistedEpoch::Genesis(ref mut epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => + Some(epoch_0), + PersistedEpoch::Genesis(_, ref mut epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => + Some(epoch_1), + PersistedEpoch::Regular(ref mut epoch_n) + if id.position == EpochIdentifierPosition::Regular => + Some(epoch_n), + _ => None, + }) } /// Get a mutable reference to a viable epoch with given descriptor. @@ -477,16 +454,15 @@ impl EpochChanges where &mut self, descriptor: &ViableEpochDescriptor, make_genesis: G, - ) -> Option> where - G: FnOnce(E::Slot) -> E + ) -> Option> + where + G: FnOnce(E::Slot) -> E, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch_mut(&identifier).map(ViableEpoch::Signaled) - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot))), + ViableEpochDescriptor::Signaled(identifier, _) => + self.epoch_mut(&identifier).map(ViableEpoch::Signaled), } } @@ -497,18 +473,15 @@ impl EpochChanges where pub fn epoch_data( &self, descriptor: &ViableEpochDescriptor, - make_genesis: G - ) -> Option where + make_genesis: G, + ) -> Option + where G: FnOnce(E::Slot) -> E, E: Clone, { match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot) => { - Some(make_genesis(*slot)) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch(&identifier).cloned() - }, + ViableEpochDescriptor::UnimportedGenesis(slot) => Some(make_genesis(*slot)), + ViableEpochDescriptor::Signaled(identifier, _) => self.epoch(&identifier).cloned(), } } @@ -524,7 +497,8 @@ impl EpochChanges where parent_number: Number, slot: E::Slot, make_genesis: G, - ) -> Result, fork_tree::Error> where + ) -> Result, fork_tree::Error> + where G: FnOnce(E::Slot) -> E, E: Clone, { @@ -532,7 +506,7 @@ impl EpochChanges where descendent_of_builder, parent_hash, parent_number, - slot + slot, )?; Ok(descriptor.and_then(|des| self.epoch_data(&des, make_genesis))) @@ -555,8 +529,8 @@ impl EpochChanges where // "descends" from our parent-hash. let fake_head_hash = fake_head_hash(parent_hash); - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((fake_head_hash, *parent_hash))); + let is_descendent_of = + descendent_of_builder.build_is_descendent_of(Some((fake_head_hash, *parent_hash))); if parent_number == Zero::zero() { // need to insert the genesis epoch. @@ -569,37 +543,41 @@ impl EpochChanges where // at epoch_1 -- all we're doing here is figuring out which node // we need. let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(ref epoch_0, _) => - epoch_0.start_slot <= slot, - PersistedEpochHeader::Regular(ref epoch_n) => - epoch_n.start_slot <= slot, + PersistedEpochHeader::Genesis(ref epoch_0, _) => epoch_0.start_slot <= slot, + PersistedEpochHeader::Regular(ref epoch_n) => epoch_n.start_slot <= slot, }; - self.inner.find_node_where( - &fake_head_hash, - &(parent_number + One::one()), - &is_descendent_of, - &predicate, - ) + self.inner + .find_node_where( + &fake_head_hash, + &(parent_number + One::one()), + &is_descendent_of, + &predicate, + ) .map(|n| { - n.map(|node| (match node.data { - // Ok, we found our node. - // and here we figure out which of the internal epochs - // of a genesis node to use based on their start slot. - PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot <= slot { - (EpochIdentifierPosition::Genesis1, epoch_1.clone()) - } else { - (EpochIdentifierPosition::Genesis0, epoch_0.clone()) + n.map(|node| { + ( + match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => + if epoch_1.start_slot <= slot { + (EpochIdentifierPosition::Genesis1, epoch_1.clone()) + } else { + (EpochIdentifierPosition::Genesis0, epoch_0.clone()) + }, + PersistedEpochHeader::Regular(ref epoch_n) => + (EpochIdentifierPosition::Regular, epoch_n.clone()), }, - PersistedEpochHeader::Regular(ref epoch_n) => - (EpochIdentifierPosition::Regular, epoch_n.clone()), - }, node)).map(|((position, header), node)| { - ViableEpochDescriptor::Signaled(EpochIdentifier { - position, - hash: node.hash, - number: node.number - }, header) + node, + ) + }) + .map(|((position, header), node)| { + ViableEpochDescriptor::Signaled( + EpochIdentifier { position, hash: node.hash, number: node.number }, + header, + ) }) }) } @@ -617,16 +595,11 @@ impl EpochChanges where parent_hash: Hash, epoch: IncrementedEpoch, ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((hash, parent_hash))); + let is_descendent_of = + descendent_of_builder.build_is_descendent_of(Some((hash, parent_hash))); let header = PersistedEpochHeader::::from(&epoch.0); - let res = self.inner.import( - hash, - number, - header, - &is_descendent_of, - ); + let res = self.inner.import(hash, number, header, &is_descendent_of); match res { Ok(_) | Err(fork_tree::Error::Duplicate) => { @@ -653,8 +626,7 @@ pub type SharedEpochChanges = #[cfg(test)] mod tests { - use super::*; - use super::Epoch as EpochT; + use super::{Epoch as EpochT, *}; #[derive(Debug, PartialEq)] pub struct TestError; @@ -667,15 +639,14 @@ mod tests { impl std::error::Error for TestError {} - impl<'a, F: 'a , H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F - where F: Fn(&H, &H) -> Result + impl<'a, F: 'a, H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F + where + F: Fn(&H, &H) -> Result, { type Error = TestError; type IsDescendentOf = Box Result + 'a>; - fn build_is_descendent_of(&self, current: Option<(H, H)>) - -> Self::IsDescendentOf - { + fn build_is_descendent_of(&self, current: Option<(H, H)>) -> Self::IsDescendentOf { let f = *self; Box::new(move |base, head| { let mut head = head; @@ -683,7 +654,7 @@ mod tests { if let Some((ref c_head, ref c_parent)) = current { if head == c_head { if base == c_parent { - return Ok(true); + return Ok(true) } else { head = c_parent; } @@ -709,10 +680,7 @@ mod tests { type Slot = Slot; fn increment(&self, _: ()) -> Self { - Epoch { - start_slot: self.start_slot + self.duration, - duration: self.duration, - } + Epoch { start_slot: self.start_slot + self.duration, duration: self.duration } } fn end_slot(&self) -> Slot { @@ -726,7 +694,6 @@ mod tests { #[test] fn genesis_epoch_is_created_but_not_imported() { - // // A - B // \ // — C @@ -741,12 +708,10 @@ mod tests { }; let epoch_changes = EpochChanges::<_, _, Epoch>::new(); - let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 10101, - ).unwrap().unwrap(); + let genesis_epoch = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 10101) + .unwrap() + .unwrap(); match genesis_epoch { ViableEpochDescriptor::UnimportedGenesis(slot) => { @@ -755,12 +720,10 @@ mod tests { _ => panic!("should be unimported genesis"), }; - let genesis_epoch_2 = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 10102, - ).unwrap().unwrap(); + let genesis_epoch_2 = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 10102) + .unwrap() + .unwrap(); match genesis_epoch_2 { ViableEpochDescriptor::UnimportedGenesis(slot) => { @@ -772,7 +735,6 @@ mod tests { #[test] fn epoch_changes_between_blocks() { - // // A - B // \ // — C @@ -786,34 +748,23 @@ mod tests { } }; - let make_genesis = |slot| Epoch { - start_slot: slot, - duration: 100, - }; + let make_genesis = |slot| Epoch { start_slot: slot, duration: 100 }; let mut epoch_changes = EpochChanges::<_, _, Epoch>::new(); - let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - ).unwrap().unwrap(); + let genesis_epoch = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); - let import_epoch_1 = epoch_changes - .viable_epoch(&genesis_epoch, &make_genesis) - .unwrap() - .increment(()); + let import_epoch_1 = + epoch_changes.viable_epoch(&genesis_epoch, &make_genesis).unwrap().increment(()); let epoch_1 = import_epoch_1.as_ref().clone(); - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - import_epoch_1, - ).unwrap(); + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", import_epoch_1) + .unwrap(); let genesis_epoch = epoch_changes.epoch_data(&genesis_epoch, &make_genesis).unwrap(); assert!(is_descendent_of(b"0", b"A").unwrap()); @@ -823,13 +774,10 @@ mod tests { { // x is still within the genesis epoch. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot - 1, - &make_genesis, - ).unwrap().unwrap(); + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, end_slot - 1, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(x, genesis_epoch); } @@ -837,13 +785,10 @@ mod tests { { // x is now at the next epoch, because the block is now at the // start slot of epoch 1. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot, - &make_genesis, - ).unwrap().unwrap(); + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, end_slot, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(x, epoch_1); } @@ -851,13 +796,16 @@ mod tests { { // x is now at the next epoch, because the block is now after // start slot of epoch 1. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - epoch_1.end_slot() - 1, - &make_genesis, - ).unwrap().unwrap(); + let x = epoch_changes + .epoch_data_for_child_of( + &is_descendent_of, + b"A", + 1, + epoch_1.end_slot() - 1, + &make_genesis, + ) + .unwrap() + .unwrap(); assert_eq!(x, epoch_1); } @@ -880,90 +828,65 @@ mod tests { let duration = 100; - let make_genesis = |slot| Epoch { - start_slot: slot, - duration, - }; + let make_genesis = |slot| Epoch { start_slot: slot, duration }; let mut epoch_changes = EpochChanges::new(); let next_descriptor = (); // insert genesis epoch for A { - let genesis_epoch_a_descriptor = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - ).unwrap().unwrap(); + let genesis_epoch_a_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) .unwrap() .increment(next_descriptor.clone()); - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - incremented_epoch, - ).unwrap(); + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) + .unwrap(); } // insert genesis epoch for X { - let genesis_epoch_x_descriptor = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 1000, - ).unwrap().unwrap(); + let genesis_epoch_x_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 1000) + .unwrap() + .unwrap(); let incremented_epoch = epoch_changes .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) .unwrap() .increment(next_descriptor.clone()); - epoch_changes.import( - &is_descendent_of, - *b"X", - 1, - *b"0", - incremented_epoch, - ).unwrap(); + epoch_changes + .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) + .unwrap(); } // now check that the genesis epochs for our respective block 1s // respect the chain structure. { - let epoch_for_a_child = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - 101, - &make_genesis, - ).unwrap().unwrap(); + let epoch_for_a_child = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, 101, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(epoch_for_a_child, make_genesis(100)); - let epoch_for_x_child = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"X", - 1, - 1001, - &make_genesis, - ).unwrap().unwrap(); + let epoch_for_x_child = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 1001, &make_genesis) + .unwrap() + .unwrap(); assert_eq!(epoch_for_x_child, make_genesis(1000)); - let epoch_for_x_child_before_genesis = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"X", - 1, - 101, - &make_genesis, - ).unwrap(); + let epoch_for_x_child_before_genesis = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 101, &make_genesis) + .unwrap(); // even though there is a genesis epoch at that slot, it's not in // this chain. diff --git a/substrate/client/consensus/epochs/src/migration.rs b/substrate/client/consensus/epochs/src/migration.rs index 6e7baba8053af65bfa06cd983b675b90e851d870..49e08240df8c3ccbd6a1a3c59a7c58cc1406216a 100644 --- a/substrate/client/consensus/epochs/src/migration.rs +++ b/substrate/client/consensus/epochs/src/migration.rs @@ -18,11 +18,11 @@ //! Migration types for epoch changes. -use std::collections::BTreeMap; -use codec::{Encode, Decode}; +use crate::{Epoch, EpochChanges, PersistedEpoch, PersistedEpochHeader}; +use codec::{Decode, Encode}; use fork_tree::ForkTree; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::{Epoch, EpochChanges, PersistedEpoch, PersistedEpochHeader}; +use std::collections::BTreeMap; /// Legacy definition of epoch changes. #[derive(Clone, Encode, Decode)] @@ -31,9 +31,11 @@ pub struct EpochChangesV0 { } /// Type alias for legacy definition of epoch changes. -pub type EpochChangesForV0 = EpochChangesV0<::Hash, NumberFor, Epoch>; +pub type EpochChangesForV0 = + EpochChangesV0<::Hash, NumberFor, Epoch>; -impl EpochChangesV0 where +impl EpochChangesV0 +where Hash: PartialEq + Ord + Copy, Number: Ord + Copy, { diff --git a/substrate/client/consensus/manual-seal/src/consensus.rs b/substrate/client/consensus/manual-seal/src/consensus.rs index 0cfd99cab5c99419c219b2a6483efa2a30299780..1f7ee413b71d0d24f955c2f43ca183e11a9259dc 100644 --- a/substrate/client/consensus/manual-seal/src/consensus.rs +++ b/substrate/client/consensus/manual-seal/src/consensus.rs @@ -19,26 +19,30 @@ //! Extensions for manual seal to produce blocks valid for any runtime. use super::Error; -use sp_runtime::traits::{Block as BlockT, DigestFor}; -use sp_inherents::InherentData; use sp_consensus::BlockImportParams; +use sp_inherents::InherentData; +use sp_runtime::traits::{Block as BlockT, DigestFor}; pub mod babe; -/// Consensus data provider, manual seal uses this trait object for authoring blocks valid +/// Consensus data provider, manual seal uses this trait object for authoring blocks valid /// for any runtime. pub trait ConsensusDataProvider: Send + Sync { /// Block import transaction type type Transaction; /// Attempt to create a consensus digest. - fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error>; + fn create_digest( + &self, + parent: &B::Header, + inherents: &InherentData, + ) -> Result, Error>; /// set up the neccessary import params. fn append_block_import( &self, parent: &B::Header, params: &mut BlockImportParams, - inherents: &InherentData + inherents: &InherentData, ) -> Result<(), Error>; } diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index fb2d47b48fed17a84869e586472bb10597c4b1b0..3773c7c3cf1212e63f1f2623b352b63d09aec542 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -21,30 +21,40 @@ use super::ConsensusDataProvider; use crate::Error; use codec::Encode; -use std::{borrow::Cow, sync::{Arc, atomic}, time::SystemTime}; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus_babe::{ - Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY, - find_pre_digest, + authorship, find_pre_digest, BabeIntermediate, CompatibleDigestItem, Config, Epoch, + INTERMEDIATE_KEY, +}; +use sc_consensus_epochs::{ + descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, }; -use sc_consensus_epochs::{SharedEpochChanges, descendent_query, ViableEpochDescriptor, EpochHeader}; use sp_keystore::SyncCryptoStorePtr; +use std::{ + borrow::Cow, + sync::{atomic, Arc}, + time::SystemTime, +}; use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_consensus::{BlockImportParams, BlockOrigin, ForkChoiceStrategy}; -use sp_consensus_slots::Slot; +use sp_consensus::{ + import_queue::{CacheKeyId, Verifier}, + BlockImportParams, BlockOrigin, ForkChoiceStrategy, +}; use sp_consensus_babe::{ - BabeApi, inherents::BabeInherentData, ConsensusLog, BABE_ENGINE_ID, AuthorityId, - digests::{PreDigest, SecondaryPlainPreDigest, NextEpochDescriptor}, BabeAuthorityWeight, + digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, + inherents::BabeInherentData, + AuthorityId, BabeApi, BabeAuthorityWeight, ConsensusLog, BABE_ENGINE_ID, }; +use sp_consensus_slots::Slot; use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; use sp_runtime::{ - traits::{DigestItemFor, DigestFor, Block as BlockT, Zero, Header}, - generic::{Digest, BlockId}, Justifications, + generic::{BlockId, Digest}, + traits::{Block as BlockT, DigestFor, DigestItemFor, Header, Zero}, + Justifications, }; -use sp_timestamp::{InherentType, INHERENT_IDENTIFIER, TimestampInherentData}; -use sp_consensus::import_queue::{Verifier, CacheKeyId}; +use sp_timestamp::{InherentType, TimestampInherentData, INHERENT_IDENTIFIER}; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. @@ -77,19 +87,16 @@ pub struct BabeVerifier { impl BabeVerifier { /// create a nrew verifier pub fn new(epoch_changes: SharedEpochChanges, client: Arc) -> BabeVerifier { - BabeVerifier { - epoch_changes, - client, - } + BabeVerifier { epoch_changes, client } } } /// The verifier for the manual seal engine; instantly finalizes. #[async_trait::async_trait] impl Verifier for BabeVerifier - where - B: BlockT, - C: HeaderBackend + HeaderMetadata +where + B: BlockT, + C: HeaderBackend + HeaderMetadata, { async fn verify( &mut self, @@ -107,7 +114,9 @@ impl Verifier for BabeVerifier let pre_digest = find_pre_digest::(&header)?; let parent_hash = header.parent_hash(); - let parent = self.client.header(BlockId::Hash(*parent_hash)) + let parent = self + .client + .header(BlockId::Hash(*parent_hash)) .ok() .flatten() .ok_or_else(|| format!("header for block {} not found", parent_hash))?; @@ -134,14 +143,14 @@ impl Verifier for BabeVerifier } impl BabeConsensusDataProvider - where - B: BlockT, - C: AuxStore - + HeaderBackend - + ProvideRuntimeApi - + HeaderMetadata - + UsageProvider, - C::Api: BabeApi, +where + B: BlockT, + C: AuxStore + + HeaderBackend + + ProvideRuntimeApi + + HeaderMetadata + + UsageProvider, + C::Api: BabeApi, { pub fn new( client: Arc, @@ -155,13 +164,7 @@ impl BabeConsensusDataProvider let config = Config::get_or_compute(&*client)?; - Ok(Self { - config, - client, - keystore, - epoch_changes, - authorities, - }) + Ok(Self { config, client, keystore, epoch_changes, authorities }) } fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { @@ -177,10 +180,7 @@ impl BabeConsensusDataProvider .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; let epoch = epoch_changes - .viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot), - ) + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .ok_or_else(|| { log::info!(target: "babe", "create_digest: no viable_epoch :("); sp_consensus::Error::InvalidAuthoritiesSet @@ -191,38 +191,37 @@ impl BabeConsensusDataProvider } impl ConsensusDataProvider for BabeConsensusDataProvider - where - B: BlockT, - C: AuxStore - + HeaderBackend - + HeaderMetadata - + UsageProvider - + ProvideRuntimeApi, - C::Api: BabeApi, +where + B: BlockT, + C: AuxStore + + HeaderBackend + + HeaderMetadata + + UsageProvider + + ProvideRuntimeApi, + C::Api: BabeApi, { type Transaction = TransactionFor; - fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result, Error> { - let slot = inherents.babe_inherent_data()? + fn create_digest( + &self, + parent: &B::Header, + inherents: &InherentData, + ) -> Result, Error> { + let slot = inherents + .babe_inherent_data()? .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; let epoch = self.epoch(parent, slot)?; // this is a dev node environment, we should always be able to claim a slot. - let logs = if let Some((predigest, _)) = authorship::claim_slot( - slot, - &epoch, - &self.keystore, - ) { - vec![ - as CompatibleDigestItem>::babe_pre_digest(predigest), - ] + let logs = if let Some((predigest, _)) = + authorship::claim_slot(slot, &epoch, &self.keystore) + { + vec![ as CompatibleDigestItem>::babe_pre_digest(predigest)] } else { // well we couldn't claim a slot because this is an existing chain and we're not in the authorities. // we need to tell BabeBlockImport that the epoch has changed, and we put ourselves in the authorities. - let predigest = PreDigest::SecondaryPlain(SecondaryPlainPreDigest { - slot, - authority_index: 0_u32, - }); + let predigest = + PreDigest::SecondaryPlain(SecondaryPlainPreDigest { slot, authority_index: 0_u32 }); let mut epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes @@ -232,12 +231,15 @@ impl ConsensusDataProvider for BabeConsensusDataProvider parent.number().clone(), slot, ) - .map_err(|e| Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)))? + .map_err(|e| { + Error::StringError(format!("failed to fetch epoch_descriptor: {}", e)) + })? .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; match epoch_descriptor { ViableEpochDescriptor::Signaled(identifier, _epoch_header) => { - let epoch_mut = epoch_changes.epoch_mut(&identifier) + let epoch_mut = epoch_changes + .epoch_mut(&identifier) .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet)?; // mutate the current epoch @@ -251,15 +253,13 @@ impl ConsensusDataProvider for BabeConsensusDataProvider vec![ DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()), - DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode()) + DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode()), ] }, ViableEpochDescriptor::UnimportedGenesis(_) => { // since this is the genesis, secondary predigest works for now. - vec![ - DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()), - ] - } + vec![DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode())] + }, } }; @@ -270,9 +270,10 @@ impl ConsensusDataProvider for BabeConsensusDataProvider &self, parent: &B::Header, params: &mut BlockImportParams, - inherents: &InherentData + inherents: &InherentData, ) -> Result<(), Error> { - let slot = inherents.babe_inherent_data()? + let slot = inherents + .babe_inherent_data()? .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; let epoch_changes = self.epoch_changes.shared_data(); let mut epoch_descriptor = epoch_changes @@ -289,27 +290,27 @@ impl ConsensusDataProvider for BabeConsensusDataProvider // a quick check to see if we're in the authorities let epoch = self.epoch(parent, slot)?; let (authority, _) = self.authorities.first().expect("authorities is non-emptyp; qed"); - let has_authority = epoch.authorities.iter() - .find(|(id, _)| *id == *authority) - .is_some(); + let has_authority = epoch.authorities.iter().find(|(id, _)| *id == *authority).is_some(); if !has_authority { log::info!(target: "manual-seal", "authority not found"); - let timestamp = inherents.timestamp_inherent_data()? + let timestamp = inherents + .timestamp_inherent_data()? .ok_or_else(|| Error::StringError("No timestamp inherent data".into()))?; let slot = *timestamp / self.config.slot_duration; // manually hard code epoch descriptor epoch_descriptor = match epoch_descriptor { - ViableEpochDescriptor::Signaled(identifier, _header) => { + ViableEpochDescriptor::Signaled(identifier, _header) => ViableEpochDescriptor::Signaled( identifier, EpochHeader { start_slot: slot.into(), end_slot: (slot * self.config.epoch_length).into(), }, - ) - }, - _ => unreachable!("we're not in the authorities, so this isn't the genesis epoch; qed") + ), + _ => unreachable!( + "we're not in the authorities, so this isn't the genesis epoch; qed" + ), }; } @@ -326,16 +327,16 @@ impl ConsensusDataProvider for BabeConsensusDataProvider /// Mocks the timestamp inherent to always produce the timestamp for the next babe slot. pub struct SlotTimestampProvider { time: atomic::AtomicU64, - slot_duration: u64 + slot_duration: u64, } impl SlotTimestampProvider { /// Create a new mocked time stamp provider. pub fn new(client: Arc) -> Result - where - B: BlockT, - C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, - C::Api: BabeApi, + where + B: BlockT, + C: AuxStore + HeaderBackend + ProvideRuntimeApi + UsageProvider, + C::Api: BabeApi, { let slot_duration = Config::get_or_compute(&*client)?.slot_duration; let info = client.info(); @@ -355,10 +356,7 @@ impl SlotTimestampProvider { .as_millis() as u64 }; - Ok(Self { - time: atomic::AtomicU64::new(time), - slot_duration, - }) + Ok(Self { time: atomic::AtomicU64::new(time), slot_duration }) } /// Get the current slot number @@ -369,12 +367,13 @@ impl SlotTimestampProvider { #[async_trait::async_trait] impl InherentDataProvider for SlotTimestampProvider { - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), sp_inherents::Error> { + fn provide_inherent_data( + &self, + inherent_data: &mut InherentData, + ) -> Result<(), sp_inherents::Error> { // we update the time here. - let duration: InherentType = self.time.fetch_add( - self.slot_duration, - atomic::Ordering::SeqCst, - ).into(); + let duration: InherentType = + self.time.fetch_add(self.slot_duration, atomic::Ordering::SeqCst).into(); inherent_data.put_data(INHERENT_IDENTIFIER, &duration)?; Ok(()) } diff --git a/substrate/client/consensus/manual-seal/src/error.rs b/substrate/client/consensus/manual-seal/src/error.rs index 77140c835a3eea86c651f841de8b3c03ca25630a..cd7fc0ee73ce16011e1696310976a23e11762a67 100644 --- a/substrate/client/consensus/manual-seal/src/error.rs +++ b/substrate/client/consensus/manual-seal/src/error.rs @@ -19,10 +19,10 @@ //! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. //! This is suitable for a testing environment. -use sp_consensus::{Error as ConsensusError, ImportResult}; +use futures::channel::{mpsc::SendError, oneshot}; use sp_blockchain::Error as BlockchainError; +use sp_consensus::{Error as ConsensusError, ImportResult}; use sp_inherents::Error as InherentsError; -use futures::channel::{oneshot, mpsc::SendError}; /// Error code for rpc mod codes { @@ -63,14 +63,14 @@ pub enum Error { #[display(fmt = "{}", _0)] #[from(ignore)] StringError(String), - ///send error + /// send error #[display(fmt = "Consensus process is terminating")] Canceled(oneshot::Canceled), - ///send error + /// send error #[display(fmt = "Consensus process is terminating")] SendError(SendError), /// Some other error. - #[display(fmt="Other error: {}", _0)] + #[display(fmt = "Other error: {}", _0)] Other(Box), } @@ -85,7 +85,7 @@ impl Error { InherentError(_) => codes::INHERENTS_ERROR, BlockchainError(_) => codes::BLOCKCHAIN_ERROR, SendError(_) | Canceled(_) => codes::SERVER_SHUTTING_DOWN, - _ => codes::UNKNOWN_ERROR + _ => codes::UNKNOWN_ERROR, } } } @@ -95,7 +95,7 @@ impl std::convert::From for jsonrpc_core::Error { jsonrpc_core::Error { code: jsonrpc_core::ErrorCode::ServerError(error.to_code()), message: format!("{}", error), - data: None + data: None, } } } diff --git a/substrate/client/consensus/manual-seal/src/finalize_block.rs b/substrate/client/consensus/manual-seal/src/finalize_block.rs index 76ae6eeeae5aceebab426be0af8c7f66f13c3aa5..a5ddf1d162f7a4ab08d68654acf7216ed2c33ef8 100644 --- a/substrate/client/consensus/manual-seal/src/finalize_block.rs +++ b/substrate/client/consensus/manual-seal/src/finalize_block.rs @@ -19,14 +19,9 @@ //! Block finalization utilities use crate::rpc; -use sp_runtime::{ - Justification, - traits::Block as BlockT, - generic::BlockId, -}; -use std::sync::Arc; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use std::marker::PhantomData; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification}; +use std::{marker::PhantomData, sync::Arc}; /// params for block finalization. pub struct FinalizeBlockParams { @@ -42,30 +37,23 @@ pub struct FinalizeBlockParams { pub _phantom: PhantomData, } - /// finalizes a block in the backend with the given params. pub async fn finalize_block(params: FinalizeBlockParams) - where - B: BlockT, - F: Finalizer, - CB: ClientBackend, +where + B: BlockT, + F: Finalizer, + CB: ClientBackend, { - let FinalizeBlockParams { - hash, - mut sender, - justification, - finalizer, - .. - } = params; + let FinalizeBlockParams { hash, mut sender, justification, finalizer, .. } = params; match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { Err(e) => { log::warn!("Failed to finalize block {:?}", e); rpc::send_result(&mut sender, Err(e.into())) - } + }, Ok(()) => { log::info!("✅ Successfully finalized block: {}", hash); rpc::send_result(&mut sender, Ok(())) - } + }, } } diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index 5d93f6724ee9fcd3aa5a76eb6f533fb738a7671b..1aacd22aa7bb812326ec5c8356e22dee31bd31a3 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -20,17 +20,17 @@ //! This is suitable for a testing environment. use futures::prelude::*; +use prometheus_endpoint::Registry; +use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; +use sp_blockchain::HeaderBackend; use sp_consensus::{ - Environment, Proposer, SelectChain, BlockImport, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, - import_queue::{Verifier, BasicQueue, CacheKeyId, BoxBlockImport}, + import_queue::{BasicQueue, BoxBlockImport, CacheKeyId, Verifier}, + BlockImport, BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, Proposer, + SelectChain, }; -use sp_blockchain::HeaderBackend; use sp_inherents::CreateInherentDataProviders; -use sp_runtime::{traits::Block as BlockT, Justifications, ConsensusEngineId}; -use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use std::{sync::Arc, marker::PhantomData}; -use prometheus_endpoint::Registry; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId, Justifications}; +use std::{marker::PhantomData, sync::Arc}; mod error; mod finalize_block; @@ -40,14 +40,14 @@ pub mod consensus; pub mod rpc; pub use self::{ - error::Error, consensus::ConsensusDataProvider, + error::Error, finalize_block::{finalize_block, FinalizeBlockParams}, - seal_block::{SealBlockParams, seal_block, MAX_PROPOSAL_DURATION}, - rpc::{EngineCommand, CreatedBlock}, + rpc::{CreatedBlock, EngineCommand}, + seal_block::{seal_block, SealBlockParams, MAX_PROPOSAL_DURATION}, }; -use sp_api::{ProvideRuntimeApi, TransactionFor}; use sc_transaction_pool_api::TransactionPool; +use sp_api::{ProvideRuntimeApi, TransactionFor}; /// The `ConsensusEngineId` of Manual Seal. pub const MANUAL_SEAL_ENGINE_ID: ConsensusEngineId = [b'm', b'a', b'n', b'l']; @@ -80,17 +80,11 @@ pub fn import_queue( spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, ) -> BasicQueue - where - Block: BlockT, - Transaction: Send + Sync + 'static, +where + Block: BlockT, + Transaction: Send + Sync + 'static, { - BasicQueue::new( - ManualSealVerifier, - block_import, - None, - spawner, - registry, - ) + BasicQueue::new(ManualSealVerifier, block_import, None, spawner, registry) } /// Params required to start the instant sealing authorship task. @@ -115,7 +109,8 @@ pub struct ManualSealParams, TP, SC, C pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>>, + pub consensus_data_provider: + Option>>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, @@ -139,7 +134,8 @@ pub struct InstantSealParams, TP, SC, pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>>, + pub consensus_data_provider: + Option>>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, @@ -156,58 +152,52 @@ pub async fn run_manual_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: ManualSealParams -) - where - B: BlockT + 'static, - BI: BlockImport> - + Send + Sync + 'static, - C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, - CB: ClientBackend + 'static, - E: Environment + 'static, - E::Proposer: Proposer>, - CS: Stream::Hash>> + Unpin + 'static, - SC: SelectChain + 'static, - TransactionFor: 'static, - TP: TransactionPool, - CIDP: CreateInherentDataProviders, + }: ManualSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + CS: Stream::Hash>> + Unpin + 'static, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, { while let Some(command) = commands_stream.next().await { match command { - EngineCommand::SealNewBlock { - create_empty, - finalize, - parent_hash, - sender, - } => { - seal_block( - SealBlockParams { - sender, - parent_hash, - finalize, - create_empty, - env: &mut env, - select_chain: &select_chain, - block_import: &mut block_import, - consensus_data_provider: consensus_data_provider.as_ref().map(|p| &**p), - pool: pool.clone(), - client: client.clone(), - create_inherent_data_providers: &create_inherent_data_providers, - } - ).await; - } + EngineCommand::SealNewBlock { create_empty, finalize, parent_hash, sender } => { + seal_block(SealBlockParams { + sender, + parent_hash, + finalize, + create_empty, + env: &mut env, + select_chain: &select_chain, + block_import: &mut block_import, + consensus_data_provider: consensus_data_provider.as_ref().map(|p| &**p), + pool: pool.clone(), + client: client.clone(), + create_inherent_data_providers: &create_inherent_data_providers, + }) + .await; + }, EngineCommand::FinalizeBlock { hash, sender, justification } => { let justification = justification.map(|j| (MANUAL_SEAL_ENGINE_ID, j)); - finalize_block( - FinalizeBlockParams { - hash, - sender, - justification, - finalizer: client.clone(), - _phantom: PhantomData, - } - ).await - } + finalize_block(FinalizeBlockParams { + hash, + sender, + justification, + finalizer: client.clone(), + _phantom: PhantomData, + }) + .await + }, } } } @@ -224,63 +214,57 @@ pub async fn run_instant_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: InstantSealParams -) - where - B: BlockT + 'static, - BI: BlockImport> - + Send + Sync + 'static, - C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, - CB: ClientBackend + 'static, - E: Environment + 'static, - E::Proposer: Proposer>, - SC: SelectChain + 'static, - TransactionFor: 'static, - TP: TransactionPool, - CIDP: CreateInherentDataProviders, + }: InstantSealParams, +) where + B: BlockT + 'static, + BI: BlockImport> + + Send + + Sync + + 'static, + C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Proposer: Proposer>, + SC: SelectChain + 'static, + TransactionFor: 'static, + TP: TransactionPool, + CIDP: CreateInherentDataProviders, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. - let commands_stream = pool.import_notification_stream() - .map(|_| { - EngineCommand::SealNewBlock { - create_empty: false, - finalize: false, - parent_hash: None, - sender: None, - } - }); - - run_manual_seal( - ManualSealParams { - block_import, - env, - client, - pool, - commands_stream, - select_chain, - consensus_data_provider, - create_inherent_data_providers, - } - ).await + let commands_stream = pool.import_notification_stream().map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: false, + parent_hash: None, + sender: None, + }); + + run_manual_seal(ManualSealParams { + block_import, + env, + client, + pool, + commands_stream, + select_chain, + consensus_data_provider, + create_inherent_data_providers, + }) + .await } #[cfg(test)] mod tests { use super::*; - use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, - TestClientBuilderExt, - AccountKeyring::*, - TestClientBuilder, - }; - use sc_transaction_pool::{BasicPool, RevalidationType, Options}; - use substrate_test_runtime_transaction_pool::{TestApi, uxt}; - use sc_transaction_pool_api::{TransactionPool, MaintainedTransactionPool, TransactionSource}; - use sp_runtime::generic::BlockId; - use sp_consensus::ImportedAux; use sc_basic_authorship::ProposerFactory; use sc_client_api::BlockBackend; + use sc_transaction_pool::{BasicPool, Options, RevalidationType}; + use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; + use sp_consensus::ImportedAux; + use sp_runtime::generic::BlockId; + use substrate_test_runtime_client::{ + AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + use substrate_test_runtime_transaction_pool::{uxt, TestApi}; fn api() -> Arc { Arc::new(TestApi::empty()) @@ -303,40 +287,32 @@ mod tests { spawner.clone(), 0, )); - let env = ProposerFactory::new( - spawner.clone(), - client.clone(), - pool.clone(), - None, - None, - ); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as transactions are imported into the pool. let (sender, receiver) = futures::channel::oneshot::channel(); let mut sender = Arc::new(Some(sender)); - let commands_stream = pool.pool().validated_pool().import_notification_stream() - .map(move |_| { + let commands_stream = + pool.pool().validated_pool().import_notification_stream().map(move |_| { // we're only going to submit one tx so this fn will only be called once. - let mut_sender = Arc::get_mut(&mut sender).unwrap(); + let mut_sender = Arc::get_mut(&mut sender).unwrap(); let sender = std::mem::take(mut_sender); EngineCommand::SealNewBlock { create_empty: false, finalize: true, parent_hash: None, - sender + sender, } }); - let future = run_manual_seal( - ManualSealParams { - block_import: client.clone(), - env, - client: client.clone(), - pool: pool.clone(), - commands_stream, - select_chain, - create_inherent_data_providers: |_, _| async { Ok(()) }, - consensus_data_provider: None, - } - ); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + create_inherent_data_providers: |_, _| async { Ok(()) }, + consensus_data_provider: None, + }); std::thread::spawn(|| { let mut rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task @@ -380,27 +356,19 @@ mod tests { spawner.clone(), 0, )); - let env = ProposerFactory::new( - spawner.clone(), - client.clone(), - pool.clone(), - None, - None, - ); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal( - ManualSealParams { - block_import: client.clone(), - env, - client: client.clone(), - pool: pool.clone(), - commands_stream, - select_chain, - consensus_data_provider: None, - create_inherent_data_providers: |_, _| async { Ok(()) }, - } - ); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: |_, _| async { Ok(()) }, + }); std::thread::spawn(|| { let mut rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task @@ -416,7 +384,9 @@ mod tests { sender: Some(tx), create_empty: false, finalize: false, - }).await.unwrap(); + }) + .await + .unwrap(); let created_block = rx.await.unwrap().unwrap(); // assert that the background task returns ok @@ -439,8 +409,10 @@ mod tests { sink.send(EngineCommand::FinalizeBlock { sender: Some(tx), hash: header.hash(), - justification: None - }).await.unwrap(); + justification: None, + }) + .await + .unwrap(); // assert that the background task returns ok assert_eq!(rx.await.unwrap().unwrap(), ()); } @@ -461,27 +433,19 @@ mod tests { spawner.clone(), 0, )); - let env = ProposerFactory::new( - spawner.clone(), - client.clone(), - pool.clone(), - None, - None, - ); + let env = ProposerFactory::new(spawner.clone(), client.clone(), pool.clone(), None, None); // this test checks that blocks are created as soon as an engine command is sent over the stream. let (mut sink, commands_stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal( - ManualSealParams { - block_import: client.clone(), - env, - client: client.clone(), - pool: pool.clone(), - commands_stream, - select_chain, - consensus_data_provider: None, - create_inherent_data_providers: |_, _| async { Ok(()) }, - } - ); + let future = run_manual_seal(ManualSealParams { + block_import: client.clone(), + env, + client: client.clone(), + pool: pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: None, + create_inherent_data_providers: |_, _| async { Ok(()) }, + }); std::thread::spawn(|| { let mut rt = tokio::runtime::Runtime::new().unwrap(); // spawn the background authorship task @@ -498,7 +462,9 @@ mod tests { sender: Some(tx), create_empty: false, finalize: false, - }).await.unwrap(); + }) + .await + .unwrap(); let created_block = rx.await.unwrap().unwrap(); pool_api.increment_nonce(Alice.into()); @@ -524,31 +490,35 @@ mod tests { pool.maintain(sc_transaction_pool_api::ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None, - }).await; + }) + .await; let (tx1, rx1) = futures::channel::oneshot::channel(); - assert!(sink.send(EngineCommand::SealNewBlock { - parent_hash: Some(created_block.hash), - sender: Some(tx1), - create_empty: false, - finalize: false, - }).await.is_ok()); - assert_matches::assert_matches!( - rx1.await.expect("should be no error receiving"), - Ok(_) - ); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx1), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); + assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); let block = client.block(&BlockId::Number(2)).unwrap().unwrap().block; pool_api.add_block(block, true); pool_api.increment_nonce(Alice.into()); assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); - assert!(sink.send(EngineCommand::SealNewBlock { - parent_hash: Some(created_block.hash), - sender: Some(tx2), - create_empty: false, - finalize: false, - }).await.is_ok()); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx2), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); let imported = rx2.await.unwrap().unwrap(); // assert that fork block is in the db assert!(client.header(&BlockId::Hash(imported.hash)).unwrap().is_some()) diff --git a/substrate/client/consensus/manual-seal/src/rpc.rs b/substrate/client/consensus/manual-seal/src/rpc.rs index eb056f22fed8b2ef090838315b36bec38efc697f..0f686bc26e7df65af8b060824b075bc5572051f6 100644 --- a/substrate/client/consensus/manual-seal/src/rpc.rs +++ b/substrate/client/consensus/manual-seal/src/rpc.rs @@ -18,18 +18,16 @@ //! RPC interface for the `ManualSeal` Engine. -use sp_consensus::ImportedAux; -use jsonrpc_core::Error; -use jsonrpc_derive::rpc; +pub use self::gen_client::Client as ManualSealClient; use futures::{ channel::{mpsc, oneshot}, - TryFutureExt, - FutureExt, - SinkExt + FutureExt, SinkExt, TryFutureExt, }; +use jsonrpc_core::Error; +use jsonrpc_derive::rpc; use serde::{Deserialize, Serialize}; +use sp_consensus::ImportedAux; use sp_runtime::EncodedJustification; -pub use self::gen_client::Client as ManualSealClient; /// Future's type for jsonrpc type FutureResult = Box + Send>; @@ -63,7 +61,7 @@ pub enum EngineCommand { sender: Sender<()>, /// finalization justification justification: Option, - } + }, } /// RPC trait that provides methods for interacting with the manual-seal authorship task over rpc. @@ -75,7 +73,7 @@ pub trait ManualSealApi { &self, create_empty: bool, finalize: bool, - parent_hash: Option + parent_hash: Option, ) -> FutureResult>; /// Instructs the manual-seal authorship task to finalize a block @@ -83,7 +81,7 @@ pub trait ManualSealApi { fn finalize_block( &self, hash: Hash, - justification: Option + justification: Option, ) -> FutureResult; } @@ -98,7 +96,7 @@ pub struct CreatedBlock { /// hash of the created block. pub hash: Hash, /// some extra details about the import operation - pub aux: ImportedAux + pub aux: ImportedAux, } impl ManualSeal { @@ -113,7 +111,7 @@ impl ManualSealApi for ManualSeal { &self, create_empty: bool, finalize: bool, - parent_hash: Option + parent_hash: Option, ) -> FutureResult> { let mut sink = self.import_block_channel.clone(); let future = async move { @@ -126,18 +124,22 @@ impl ManualSealApi for ManualSeal { }; sink.send(command).await?; receiver.await? - }.boxed(); + } + .boxed(); Box::new(future.map_err(Error::from).compat()) } - fn finalize_block(&self, hash: Hash, justification: Option) -> FutureResult { + fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> FutureResult { let mut sink = self.import_block_channel.clone(); let future = async move { let (sender, receiver) = oneshot::channel(); - sink.send( - EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification } - ).await?; + sink.send(EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }) + .await?; receiver.await?.map(|_| true) }; @@ -150,7 +152,7 @@ impl ManualSealApi for ManualSeal { /// to the rpc pub fn send_result( sender: &mut Sender, - result: std::result::Result + result: std::result::Result, ) { if let Some(sender) = sender.take() { if let Err(err) = sender.send(result) { @@ -160,7 +162,7 @@ pub fn send_result( // instant seal doesn't report errors over rpc, simply log them. match result { Ok(r) => log::info!("Instant Seal success: {:?}", r), - Err(e) => log::error!("Instant Seal encountered an error: {}", e) + Err(e) => log::error!("Instant Seal encountered an error: {}", e), } } } diff --git a/substrate/client/consensus/manual-seal/src/seal_block.rs b/substrate/client/consensus/manual-seal/src/seal_block.rs index 450a7bff4cd40cba59c5dc15b3d65272585d17ae..be97e0ccc360cb8b9da0971ceb0a4c2f9762f28f 100644 --- a/substrate/client/consensus/manual-seal/src/seal_block.rs +++ b/substrate/client/consensus/manual-seal/src/seal_block.rs @@ -18,23 +18,21 @@ //! Block sealing utilities -use crate::{Error, rpc, CreatedBlock, ConsensusDataProvider}; -use std::sync::Arc; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, - generic::BlockId, -}; +use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; use futures::prelude::*; +use sc_transaction_pool_api::TransactionPool; +use sp_api::{ProvideRuntimeApi, TransactionFor}; +use sp_blockchain::HeaderBackend; use sp_consensus::{ - self, BlockImport, Environment, Proposer, ForkChoiceStrategy, - BlockImportParams, BlockOrigin, ImportResult, SelectChain, StateAction, + self, BlockImport, BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, + ImportResult, Proposer, SelectChain, StateAction, }; -use sp_blockchain::HeaderBackend; -use std::collections::HashMap; -use std::time::Duration; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; -use sp_api::{ProvideRuntimeApi, TransactionFor}; -use sc_transaction_pool_api::TransactionPool; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; +use std::{collections::HashMap, sync::Arc, time::Duration}; /// max duration for creating a proposal in secs pub const MAX_PROPOSAL_DURATION: u64 = 10; @@ -59,7 +57,8 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP /// SelectChain object pub select_chain: &'a SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>>, + pub consensus_data_provider: + Option<&'a dyn ConsensusDataProvider>>, /// block import object pub block_import: &'a mut BI, /// Something that can create the inherent data providers. @@ -97,7 +96,7 @@ pub async fn seal_block( { let future = async { if pool.status().ready == 0 && !create_empty { - return Err(Error::EmptyTransactionPool); + return Err(Error::EmptyTransactionPool) } // get the header to build this new block on. @@ -129,12 +128,15 @@ pub async fn seal_block( Default::default() }; - let proposal = proposer.propose( - inherent_data.clone(), - digest, - Duration::from_secs(MAX_PROPOSAL_DURATION), - None, - ).map_err(|err| Error::StringError(format!("{:?}", err))).await?; + let proposal = proposer + .propose( + inherent_data.clone(), + digest, + Duration::from_secs(MAX_PROPOSAL_DURATION), + None, + ) + .map_err(|err| Error::StringError(format!("{:?}", err))) + .await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { return Err(Error::EmptyTransactionPool) @@ -145,18 +147,17 @@ pub async fn seal_block( params.body = Some(body); params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - params.state_action = StateAction::ApplyChanges( - sp_consensus::StorageChanges::Changes(proposal.storage_changes) - ); + params.state_action = StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes( + proposal.storage_changes, + )); if let Some(digest_provider) = digest_provider { digest_provider.append_block_import(&parent, &mut params, &inherent_data)?; } match block_import.import_block(params, HashMap::new()).await? { - ImportResult::Imported(aux) => { - Ok(CreatedBlock { hash: ::Header::hash(&header), aux }) - }, + ImportResult::Imported(aux) => + Ok(CreatedBlock { hash: ::Header::hash(&header), aux }), other => Err(other.into()), } }; diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs index e71726564ebe5f3d0b79fb7156e57da4e09807b7..7e5b5a59c91705961f91385c25535f2118fa4c3d 100644 --- a/substrate/client/consensus/pow/src/lib.rs +++ b/substrate/client/consensus/pow/src/lib.rs @@ -41,34 +41,33 @@ mod worker; -pub use crate::worker::{MiningWorker, MiningMetadata, MiningBuild}; +pub use crate::worker::{MiningBuild, MiningMetadata, MiningWorker}; -use std::{ - sync::Arc, borrow::Cow, collections::HashMap, marker::PhantomData, - cmp::Ordering, time::Duration, -}; +use codec::{Decode, Encode}; use futures::{Future, StreamExt}; +use log::*; use parking_lot::Mutex; -use sc_client_api::{BlockOf, backend::AuxStore, BlockchainEvents}; -use sp_blockchain::{HeaderBackend, ProvideCache, well_known_cache_keys::Id as CacheKeyId}; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_runtime::{Justifications, RuntimeString}; -use sp_runtime::generic::{BlockId, Digest, DigestItem}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use prometheus_endpoint::Registry; +use sc_client_api::{self, backend::AuxStore, BlockOf, BlockchainEvents}; use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; +use sp_consensus::{ + import_queue::{BasicQueue, BoxBlockImport, BoxJustificationImport, Verifier}, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Environment, + Error as ConsensusError, ForkChoiceStrategy, ImportResult, Proposer, SelectChain, SyncOracle, +}; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; -use sp_consensus::{ - BlockImportParams, BlockOrigin, ForkChoiceStrategy, SyncOracle, Environment, Proposer, - SelectChain, Error as ConsensusError, CanAuthorWith, BlockImport, BlockCheckParams, ImportResult, +use sp_runtime::{ + generic::{BlockId, Digest, DigestItem}, + traits::{Block as BlockT, Header as HeaderT}, + Justifications, RuntimeString, }; -use sp_consensus::import_queue::{ - BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, +use std::{ + borrow::Cow, cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, + time::Duration, }; -use codec::{Encode, Decode}; -use prometheus_endpoint::Registry; -use sc_client_api; -use log::*; use crate::worker::UntilImportedOrTimeout; @@ -102,7 +101,7 @@ pub enum Error { CheckInherents(sp_inherents::Error), #[display( fmt = "Checking inherents unknown error for identifier: {:?}", - "String::from_utf8_lossy(_0)", + "String::from_utf8_lossy(_0)" )] CheckInherentsUnknownError(sp_inherents::InherentIdentifier), #[display(fmt = "Multiple pre-runtime digests")] @@ -153,7 +152,8 @@ pub struct PowAux { pub total_difficulty: Difficulty, } -impl PowAux where +impl PowAux +where Difficulty: Decode + Default, { /// Read the auxiliary from client. @@ -193,11 +193,7 @@ pub trait PowAlgorithm { /// breaking algorithms will help to protect against selfish mining. /// /// Returns if the new seal should be considered best block. - fn break_tie( - &self, - _own_seal: &Seal, - _new_seal: &Seal, - ) -> bool { + fn break_tie(&self, _own_seal: &Seal, _new_seal: &Seal) -> bool { false } /// Verify that the difficulty is valid against given seal. @@ -238,7 +234,8 @@ impl Clone } } -impl PowBlockImport where +impl PowBlockImport +where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, @@ -289,14 +286,15 @@ impl PowBlockImport(block.post_digests.last(), block.header.hash())?; - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; let difficulty = match intermediate.difficulty { Some(difficulty) => difficulty, @@ -401,14 +398,12 @@ where Ordering::Less => false, Ordering::Greater => true, Ordering::Equal => { - let best_inner_seal = fetch_seal::( - best_header.digest().logs.last(), - best_hash, - )?; + let best_inner_seal = + fetch_seal::(best_header.digest().logs.last(), best_hash)?; self.algorithm.break_tie(&best_inner_seal, &inner_seal) }, - } + }, )); } @@ -423,35 +418,33 @@ pub struct PowVerifier { } impl PowVerifier { - pub fn new( - algorithm: Algorithm, - ) -> Self { + pub fn new(algorithm: Algorithm) -> Self { Self { algorithm, _marker: PhantomData } } fn check_header( &self, mut header: B::Header, - ) -> Result<(B::Header, DigestItem), Error> where + ) -> Result<(B::Header, DigestItem), Error> + where Algorithm: PowAlgorithm, { let hash = header.hash(); let (seal, inner_seal) = match header.digest_mut().pop() { - Some(DigestItem::Seal(id, seal)) => { + Some(DigestItem::Seal(id, seal)) => if id == POW_ENGINE_ID { (DigestItem::Seal(id, seal.clone()), seal) } else { return Err(Error::WrongEngine(id)) - } - }, + }, _ => return Err(Error::HeaderUnsealed(hash)), }; let pre_hash = header.hash(); if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { - return Err(Error::FailedPreliminaryVerify); + return Err(Error::FailedPreliminaryVerify) } Ok((header, seal)) @@ -459,7 +452,8 @@ impl PowVerifier { } #[async_trait::async_trait] -impl Verifier for PowVerifier where +impl Verifier for PowVerifier +where Algorithm: PowAlgorithm + Send + Sync, Algorithm::Difficulty: 'static + Send, { @@ -473,18 +467,15 @@ impl Verifier for PowVerifier where let hash = header.hash(); let (checked_header, seal) = self.check_header(header)?; - let intermediate = PowIntermediate:: { - difficulty: None, - }; + let intermediate = PowIntermediate:: { difficulty: None }; let mut import_block = BlockImportParams::new(origin, checked_header); import_block.post_digests.push(seal); import_block.body = body; import_block.justifications = justifications; - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box<_> - ); + import_block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); import_block.post_hash = Some(hash); Ok((import_block, None)) @@ -501,10 +492,8 @@ pub fn import_queue( algorithm: Algorithm, spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, -) -> Result< - PowImportQueue, - sp_consensus::Error -> where +) -> Result, sp_consensus::Error> +where B: BlockT, Transaction: Send + Sync + 'static, Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, @@ -512,13 +501,7 @@ pub fn import_queue( { let verifier = PowVerifier::new(algorithm); - Ok(BasicQueue::new( - verifier, - block_import, - justification_import, - spawner, - registry, - )) + Ok(BasicQueue::new(verifier, block_import, justification_import, spawner, registry)) } /// Start the mining worker for PoW. This function provides the necessary helper functions that can @@ -573,13 +556,13 @@ where let task = async move { loop { if timer.next().await.is_none() { - break; + break } if sync_oracle.is_major_syncing() { debug!(target: "pow", "Skipping proposal due to sync."); worker.lock().on_major_syncing(); - return; + return } let best_header = match select_chain.best_chain().await { @@ -591,8 +574,8 @@ where Select best chain error: {:?}", err ); - return; - } + return + }, }; let best_hash = best_header.hash(); @@ -603,11 +586,11 @@ where Probably a node update is required!", err, ); - return; + return } if worker.lock().best_hash() == Some(best_hash) { - return; + return } // The worker is locked for the duration of the whole proposing period. Within this period, @@ -622,23 +605,25 @@ where Fetch difficulty failed: {:?}", err, ); - return; + return }, }; - let inherent_data_providers = - match create_inherent_data_providers.create_inherent_data_providers(best_hash, ()).await { - Ok(x) => x, - Err(err) => { - warn!( - target: "pow", - "Unable to propose new block for authoring. \ - Creating inherent data providers failed: {:?}", - err, - ); - return; - }, - }; + let inherent_data_providers = match create_inherent_data_providers + .create_inherent_data_providers(best_hash, ()) + .await + { + Ok(x) => x, + Err(err) => { + warn!( + target: "pow", + "Unable to propose new block for authoring. \ + Creating inherent data providers failed: {:?}", + err, + ); + return + }, + }; let inherent_data = match inherent_data_providers.create_inherent_data() { Ok(r) => r, @@ -649,7 +634,7 @@ where Creating inherent data failed: {:?}", e, ); - return; + return }, }; @@ -673,12 +658,10 @@ where }, }; - let proposal = match proposer.propose( - inherent_data, - inherent_digest, - build_time.clone(), - None, - ).await { + let proposal = match proposer + .propose(inherent_data, inherent_digest, build_time.clone(), None) + .await + { Ok(x) => x, Err(err) => { warn!( @@ -714,9 +697,8 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err for log in header.digest().logs() { trace!(target: "pow", "Checking log {:?}, looking for pre runtime digest", log); match (log, pre_digest.is_some()) { - (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => { - return Err(Error::MultiplePreRuntimeDigests) - }, + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => + return Err(Error::MultiplePreRuntimeDigests), (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { pre_digest = Some(v.clone()); }, @@ -733,13 +715,12 @@ fn fetch_seal( hash: B::Hash, ) -> Result, Error> { match digest { - Some(DigestItem::Seal(id, seal)) => { + Some(DigestItem::Seal(id, seal)) => if id == &POW_ENGINE_ID { Ok(seal.clone()) } else { return Err(Error::::WrongEngine(*id).into()) - } - }, + }, _ => return Err(Error::::HeaderUnsealed(hash).into()), } } diff --git a/substrate/client/consensus/pow/src/worker.rs b/substrate/client/consensus/pow/src/worker.rs index 74fbcce81341dbeff5f64cf3e165dd208991019e..572ed364c8f82caf3e649105e82728f57dad11c9 100644 --- a/substrate/client/consensus/pow/src/worker.rs +++ b/substrate/client/consensus/pow/src/worker.rs @@ -16,20 +16,25 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{pin::Pin, time::Duration, collections::HashMap, borrow::Cow}; +use futures::{ + prelude::*, + task::{Context, Poll}, +}; +use futures_timer::Delay; +use log::*; use sc_client_api::ImportNotifications; -use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, StorageChanges, - StateAction, import_queue::BoxBlockImport}; +use sp_consensus::{ + import_queue::BoxBlockImport, BlockImportParams, BlockOrigin, Proposal, StateAction, + StorageChanges, +}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, DigestItem, }; -use futures::{prelude::*, task::{Context, Poll}}; -use futures_timer::Delay; -use log::*; +use std::{borrow::Cow, collections::HashMap, pin::Pin, time::Duration}; -use crate::{INTERMEDIATE_KEY, POW_ENGINE_ID, Seal, PowAlgorithm, PowIntermediate}; +use crate::{PowAlgorithm, PowIntermediate, Seal, INTERMEDIATE_KEY, POW_ENGINE_ID}; /// Mining metadata. This is the information needed to start an actual mining loop. #[derive(Clone, Eq, PartialEq)] @@ -49,7 +54,7 @@ pub struct MiningBuild< Block: BlockT, Algorithm: PowAlgorithm, C: sp_api::ProvideRuntimeApi, - Proof + Proof, > { /// Mining metadata. pub metadata: MiningMetadata, @@ -90,10 +95,7 @@ where self.build = None; } - pub(crate) fn on_build( - &mut self, - build: MiningBuild, - ) { + pub(crate) fn on_build(&mut self, build: MiningBuild) { self.build = Some(build); } @@ -137,23 +139,25 @@ where let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(seal); import_block.body = Some(body); - import_block.state_action = StateAction::ApplyChanges( - StorageChanges::Changes(build.proposal.storage_changes) - ); + import_block.state_action = + StateAction::ApplyChanges(StorageChanges::Changes(build.proposal.storage_changes)); let intermediate = PowIntermediate:: { difficulty: Some(build.metadata.difficulty), }; - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box<_>, - ); + import_block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); let header = import_block.post_header(); match self.block_import.import_block(import_block, HashMap::default()).await { Ok(res) => { - res.handle_justification(&header.hash(), *header.number(), &mut self.justification_sync_link); + res.handle_justification( + &header.hash(), + *header.number(), + &mut self.justification_sync_link, + ); info!( target: "pow", @@ -190,15 +194,8 @@ pub struct UntilImportedOrTimeout { impl UntilImportedOrTimeout { /// Create a new stream using the given import notification and timeout duration. - pub fn new( - import_notifications: ImportNotifications, - timeout: Duration, - ) -> Self { - Self { - import_notifications, - timeout, - inner_delay: None, - } + pub fn new(import_notifications: ImportNotifications, timeout: Duration) -> Self { + Self { import_notifications, timeout, inner_delay: None } } } diff --git a/substrate/client/consensus/slots/src/aux_schema.rs b/substrate/client/consensus/slots/src/aux_schema.rs index db94ec48855e407ea8141bc9bdbb08468762e04b..af92a3a0d60f96e7e05a45462a33ed78a658a60f 100644 --- a/substrate/client/consensus/slots/src/aux_schema.rs +++ b/substrate/client/consensus/slots/src/aux_schema.rs @@ -18,9 +18,9 @@ //! Schema for slots in the aux-db. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus_slots::{EquivocationProof, Slot}; use sp_runtime::traits::Header; @@ -33,17 +33,17 @@ pub const MAX_SLOT_CAPACITY: u64 = 1000; pub const PRUNING_BOUND: u64 = 2 * MAX_SLOT_CAPACITY; fn load_decode(backend: &C, key: &[u8]) -> ClientResult> - where - C: AuxStore, - T: Decode, +where + C: AuxStore, + T: Decode, { match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) - .map_err( - |e| ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e)), - ) - .map(Some) + .map_err(|e| { + ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e)) + }) + .map(Some), } } @@ -57,14 +57,14 @@ pub fn check_equivocation( header: &H, signer: &P, ) -> ClientResult>> - where - H: Header, - C: AuxStore, - P: Clone + Encode + Decode + PartialEq, +where + H: Header, + C: AuxStore, + P: Clone + Encode + Decode + PartialEq, { // We don't check equivocations for old headers out of our capacity. if slot_now.saturating_sub(*slot) > Slot::from(MAX_SLOT_CAPACITY) { - return Ok(None); + return Ok(None) } // Key for this slot. @@ -72,17 +72,16 @@ pub fn check_equivocation( slot.using_encoded(|s| curr_slot_key.extend(s)); // Get headers of this slot. - let mut headers_with_sig = load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])? - .unwrap_or_else(Vec::new); + let mut headers_with_sig = + load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])?.unwrap_or_else(Vec::new); // Get first slot saved. let slot_header_start = SLOT_HEADER_START.to_vec(); - let first_saved_slot = load_decode::<_, Slot>(backend, &slot_header_start[..])? - .unwrap_or(slot); + let first_saved_slot = load_decode::<_, Slot>(backend, &slot_header_start[..])?.unwrap_or(slot); if slot_now < first_saved_slot { // The code below assumes that slots will be visited sequentially. - return Ok(None); + return Ok(None) } for (prev_header, prev_signer) in headers_with_sig.iter() { @@ -96,7 +95,7 @@ pub fn check_equivocation( offender: signer.clone(), first_header: prev_header.clone(), second_header: header.clone(), - })); + })) } else { // We don't need to continue in case of duplicated header, // since it's already saved and a possible equivocation @@ -135,12 +134,11 @@ pub fn check_equivocation( #[cfg(test)] mod test { - use sp_core::{sr25519, Pair}; - use sp_core::hash::H256; - use sp_runtime::testing::{Header as HeaderTest, Digest as DigestTest}; + use sp_core::{hash::H256, sr25519, Pair}; + use sp_runtime::testing::{Digest as DigestTest, Header as HeaderTest}; use substrate_test_runtime_client; - use super::{MAX_SLOT_CAPACITY, PRUNING_BOUND, check_equivocation}; + use super::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; fn create_header(number: u64) -> HeaderTest { // so that different headers for the same number get different hashes @@ -151,7 +149,7 @@ mod test { number, state_root: Default::default(), extrinsics_root: Default::default(), - digest: DigestTest { logs: vec![], }, + digest: DigestTest { logs: vec![] }, }; header @@ -171,79 +169,55 @@ mod test { let header6 = create_header(3); // @ slot 4 // It's ok to sign same headers. - assert!( - check_equivocation( - &client, - 2.into(), - 2.into(), - &header1, - &public, - ).unwrap().is_none(), - ); - - assert!( - check_equivocation( - &client, - 3.into(), - 2.into(), - &header1, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation(&client, 2.into(), 2.into(), &header1, &public,) + .unwrap() + .is_none(),); + + assert!(check_equivocation(&client, 3.into(), 2.into(), &header1, &public,) + .unwrap() + .is_none(),); // But not two different headers at the same slot. - assert!( - check_equivocation( - &client, - 4.into(), - 2.into(), - &header2, - &public, - ).unwrap().is_some(), - ); + assert!(check_equivocation(&client, 4.into(), 2.into(), &header2, &public,) + .unwrap() + .is_some(),); // Different slot is ok. - assert!( - check_equivocation( - &client, - 5.into(), - 4.into(), - &header3, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation(&client, 5.into(), 4.into(), &header3, &public,) + .unwrap() + .is_none(),); // Here we trigger pruning and save header 4. - assert!( - check_equivocation( - &client, - (PRUNING_BOUND + 2).into(), - (MAX_SLOT_CAPACITY + 4).into(), - &header4, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation( + &client, + (PRUNING_BOUND + 2).into(), + (MAX_SLOT_CAPACITY + 4).into(), + &header4, + &public, + ) + .unwrap() + .is_none(),); // This fails because header 5 is an equivocation of header 4. - assert!( - check_equivocation( - &client, - (PRUNING_BOUND + 3).into(), - (MAX_SLOT_CAPACITY + 4).into(), - &header5, - &public, - ).unwrap().is_some(), - ); + assert!(check_equivocation( + &client, + (PRUNING_BOUND + 3).into(), + (MAX_SLOT_CAPACITY + 4).into(), + &header5, + &public, + ) + .unwrap() + .is_some(),); // This is ok because we pruned the corresponding header. Shows that we are pruning. - assert!( - check_equivocation( - &client, - (PRUNING_BOUND + 4).into(), - 4.into(), - &header6, - &public, - ).unwrap().is_none(), - ); + assert!(check_equivocation( + &client, + (PRUNING_BOUND + 4).into(), + 4.into(), + &header6, + &public, + ) + .unwrap() + .is_none(),); } } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index c410f173e90ab2c9f7fe3a87d01ab502713ca6fc..b9b337c7edef9dc8784a86f7389e23c25406832d 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -25,19 +25,19 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] -mod slots; mod aux_schema; +mod slots; +pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; pub use slots::SlotInfo; use slots::Slots; -pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; -use std::{fmt::Debug, ops::Deref, time::Duration}; use codec::{Decode, Encode}; use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; use log::{debug, error, info, warn}; -use sp_api::{ProvideRuntimeApi, ApiRef}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; +use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_arithmetic::traits::BaseArithmetic; use sp_consensus::{ BlockImport, CanAuthorWith, JustificationSyncLink, Proposer, SelectChain, SlotData, SyncOracle, @@ -46,10 +46,10 @@ use sp_consensus_slots::Slot; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, HashFor, NumberFor} + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, }; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; use sp_timestamp::Timestamp; +use std::{fmt::Debug, ops::Deref, time::Duration}; /// The changes that need to applied to the storage to create the state for a block. /// @@ -76,10 +76,7 @@ pub trait SlotWorker { /// /// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in /// the slot. Otherwise `None` is returned. - async fn on_slot( - &mut self, - slot_info: SlotInfo, - ) -> Option>; + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option>; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at @@ -89,7 +86,8 @@ pub trait SlotWorker { pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. type BlockImport: BlockImport>::Transaction> - + Send + 'static; + + Send + + 'static; /// A handle to a `SyncOracle`. type SyncOracle: SyncOracle; @@ -100,7 +98,9 @@ pub trait SimpleSlotWorker { /// The type of future resolving to the proposer. type CreateProposer: Future> - + Send + Unpin + 'static; + + Send + + Unpin + + 'static; /// The type of proposer to use to build blocks. type Proposer: Proposer + Send; @@ -139,12 +139,7 @@ pub trait SimpleSlotWorker { /// Notifies the given slot. Similar to `claim_slot`, but will be called no matter whether we /// need to author blocks or not. - fn notify_slot( - &self, - _header: &B::Header, - _slot: Slot, - _epoch_data: &Self::EpochData, - ) {} + fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} /// Return the pre digest data to include in a block authored with the given claim. fn pre_digest_data( @@ -154,18 +149,24 @@ pub trait SimpleSlotWorker { ) -> Vec>; /// Returns a function which produces a `BlockImportParams`. - fn block_import_params(&self) -> Box< + fn block_import_params( + &self, + ) -> Box< dyn Fn( - B::Header, - &B::Hash, - Vec, - StorageChanges<>::Transaction, B>, - Self::Claim, - Self::EpochData, - ) -> Result< - sp_consensus::BlockImportParams>::Transaction>, - sp_consensus::Error - > + Send + 'static + B::Header, + &B::Hash, + Vec, + StorageChanges<>::Transaction, B>, + Self::Claim, + Self::EpochData, + ) -> Result< + sp_consensus::BlockImportParams< + B, + >::Transaction, + >, + sp_consensus::Error, + > + Send + + 'static, >; /// Whether to force authoring if offline. @@ -194,10 +195,7 @@ pub trait SimpleSlotWorker { fn telemetry(&self) -> Option; /// Remaining duration for proposing. - fn proposing_remaining_duration( - &self, - slot_info: &SlotInfo, - ) -> Duration; + fn proposing_remaining_duration(&self, slot_info: &SlotInfo) -> Duration; /// Implements [`SlotWorker::on_slot`]. async fn on_slot( @@ -213,8 +211,7 @@ pub trait SimpleSlotWorker { let proposing_remaining = if proposing_remaining_duration == Duration::default() { debug!( target: logging_target, - "Skipping proposal slot {} since there's no time left to propose", - slot, + "Skipping proposal slot {} since there's no time left to propose", slot, ); return None @@ -240,8 +237,8 @@ pub trait SimpleSlotWorker { "err" => ?err, ); - return None; - } + return None + }, }; self.notify_slot(&slot_info.chain_head, slot, &epoch_data); @@ -260,13 +257,13 @@ pub trait SimpleSlotWorker { "authorities_len" => authorities_len, ); - return None; + return None } let claim = self.claim_slot(&slot_info.chain_head, slot, &epoch_data)?; if self.should_backoff(slot, &slot_info.chain_head) { - return None; + return None } debug!( @@ -289,9 +286,7 @@ pub trait SimpleSlotWorker { Err(err) => { warn!( target: logging_target, - "Unable to author block in slot {:?}: {:?}", - slot, - err, + "Unable to author block in slot {:?}: {:?}", slot, err, ); telemetry!( @@ -303,7 +298,7 @@ pub trait SimpleSlotWorker { ); return None - } + }, }; let logs = self.pre_digest_data(slot, &claim); @@ -311,34 +306,29 @@ pub trait SimpleSlotWorker { // deadline our production to 98% of the total time left for proposing. As we deadline // the proposing below to the same total time left, the 2% margin should be enough for // the result to be returned. - let proposing = proposer.propose( - slot_info.inherent_data, - sp_runtime::generic::Digest { - logs, - }, - proposing_remaining_duration.mul_f32(0.98), - None, - ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))); + let proposing = proposer + .propose( + slot_info.inherent_data, + sp_runtime::generic::Digest { logs }, + proposing_remaining_duration.mul_f32(0.98), + None, + ) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))); let proposal = match futures::future::select(proposing, proposing_remaining).await { Either::Left((Ok(p), _)) => p, Either::Left((Err(err), _)) => { - warn!( - target: logging_target, - "Proposing failed: {:?}", - err, - ); + warn!(target: logging_target, "Proposing failed: {:?}", err,); return None }, Either::Right(_) => { info!( target: logging_target, - "⌛️ Discarding proposal for slot {}; block production took too long", - slot, + "⌛️ Discarding proposal for slot {}; block production took too long", slot, ); // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type="debug")] + #[cfg(build_type = "debug")] info!( target: logging_target, "👉 Recompile your node in `--release` mode to mitigate this problem.", @@ -373,14 +363,10 @@ pub trait SimpleSlotWorker { ) { Ok(bi) => bi, Err(err) => { - warn!( - target: logging_target, - "Failed to create block import params: {:?}", - err, - ); + warn!(target: logging_target, "Failed to create block import params: {:?}", err,); return None - } + }, }; info!( @@ -401,17 +387,14 @@ pub trait SimpleSlotWorker { ); let header = block_import_params.post_header(); - match block_import - .import_block(block_import_params, Default::default()) - .await - { + match block_import.import_block(block_import_params, Default::default()).await { Ok(res) => { res.handle_justification( &header.hash(), *header.number(), self.justification_sync_link(), ); - } + }, Err(err) => { warn!( target: logging_target, @@ -425,18 +408,17 @@ pub trait SimpleSlotWorker { "hash" => ?parent_hash, "err" => ?err, ); - } + }, } - Some(SlotResult { - block: B::new(header, body), - storage_proof, - }) + Some(SlotResult { block: B::new(header, body), storage_proof }) } } #[async_trait::async_trait] -impl + Send> SlotWorker>::Proof> for T { +impl + Send> SlotWorker>::Proof> + for T +{ async fn on_slot( &mut self, slot_info: SlotInfo, @@ -496,8 +478,7 @@ pub async fn start_slot_worker( mut sync_oracle: SO, create_inherent_data_providers: CIDP, can_author_with: CAW, -) -where +) where B: BlockT, C: SelectChain, W: SlotWorker, @@ -509,28 +490,25 @@ where { let SlotDuration(slot_duration) = slot_duration; - let mut slots = Slots::new( - slot_duration.slot_duration(), - create_inherent_data_providers, - client, - ); + let mut slots = + Slots::new(slot_duration.slot_duration(), create_inherent_data_providers, client); loop { let slot_info = match slots.next_slot().await { Ok(r) => r, Err(e) => { warn!(target: "slots", "Error while polling for next slot: {:?}", e); - return; - } + return + }, }; if sync_oracle.is_major_syncing() { debug!(target: "slots", "Skipping proposal slot due to sync."); - continue; + continue } - if let Err(err) = can_author_with - .can_author_with(&BlockId::Hash(slot_info.chain_head.hash())) + if let Err(err) = + can_author_with.can_author_with(&BlockId::Hash(slot_info.chain_head.hash())) { warn!( target: "slots", @@ -559,7 +537,10 @@ pub enum CheckedHeader { #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] -pub enum Error where T: Debug { +pub enum Error +where + T: Debug, +{ #[error("Slot duration is invalid: {0:?}")] SlotDurationInvalid(SlotDuration), } @@ -591,25 +572,23 @@ impl SlotDuration { /// /// `slot_key` is marked as `'static`, as it should really be a /// compile-time constant. - pub fn get_or_compute(client: &C, cb: CB) -> sp_blockchain::Result where + pub fn get_or_compute(client: &C, cb: CB) -> sp_blockchain::Result + where C: sc_client_api::backend::AuxStore + sc_client_api::UsageProvider, C: ProvideRuntimeApi, CB: FnOnce(ApiRef, &BlockId) -> sp_blockchain::Result, T: SlotData + Encode + Decode + Debug, { let slot_duration = match client.get_aux(T::SLOT_KEY)? { - Some(v) => ::decode(&mut &v[..]) - .map(SlotDuration) - .map_err(|_| { - sp_blockchain::Error::Backend({ - error!(target: "slots", "slot duration kept in invalid format"); - "slot duration kept in invalid format".to_string() - }) - }), + Some(v) => ::decode(&mut &v[..]).map(SlotDuration).map_err(|_| { + sp_blockchain::Error::Backend({ + error!(target: "slots", "slot duration kept in invalid format"); + "slot duration kept in invalid format".to_string() + }) + }), None => { let best_hash = client.usage_info().chain.best_hash; - let slot_duration = - cb(client.runtime_api(), &BlockId::hash(best_hash))?; + let slot_duration = cb(client.runtime_api(), &BlockId::hash(best_hash))?; info!( "⏱ Loaded block-time = {:?} from block {:?}", @@ -621,11 +600,13 @@ impl SlotDuration { .using_encoded(|s| client.insert_aux(&[(T::SLOT_KEY, &s[..])], &[]))?; Ok(SlotDuration(slot_duration)) - } + }, }?; if slot_duration.slot_duration() == Default::default() { - return Err(sp_blockchain::Error::Application(Box::new(Error::SlotDurationInvalid(slot_duration)))) + return Err(sp_blockchain::Error::Application(Box::new(Error::SlotDurationInvalid( + slot_duration, + )))) } Ok(slot_duration) @@ -687,9 +668,7 @@ pub fn proposing_remaining_duration( ) -> Duration { use sp_runtime::traits::Zero; - let proposing_duration = slot_info - .duration - .mul_f32(block_proposal_slot_portion.get()); + let proposing_duration = slot_info.duration.mul_f32(block_proposal_slot_portion.get()); let slot_remaining = slot_info .ends_at @@ -700,7 +679,7 @@ pub fn proposing_remaining_duration( // If parent is genesis block, we don't require any lenience factor. if slot_info.chain_head.number().is_zero() { - return proposing_duration; + return proposing_duration } let parent_slot = match parent_slot { @@ -723,9 +702,7 @@ pub fn proposing_remaining_duration( if let Some(ref max_block_proposal_slot_portion) = max_block_proposal_slot_portion { std::cmp::min( lenient_proposing_duration, - slot_info - .duration - .mul_f32(max_block_proposal_slot_portion.get()), + slot_info.duration.mul_f32(max_block_proposal_slot_portion.get()), ) } else { lenient_proposing_duration @@ -853,7 +830,7 @@ impl Default for BackoffAuthoringOnFinalizedHeadLagging { impl BackoffAuthoringBlocksStrategy for BackoffAuthoringOnFinalizedHeadLagging where - N: BaseArithmetic + Copy + N: BaseArithmetic + Copy, { fn should_backoff( &self, @@ -865,12 +842,12 @@ where ) -> bool { // This should not happen, but we want to keep the previous behaviour if it does. if slot_now <= chain_head_slot { - return false; + return false } let unfinalized_block_length = chain_head_number - finalized_number; - let interval = unfinalized_block_length.saturating_sub(self.unfinalized_slack) - / self.authoring_bias; + let interval = + unfinalized_block_length.saturating_sub(self.unfinalized_slack) / self.authoring_bias; let interval = interval.min(self.max_interval); // We're doing arithmetic between block and slot numbers. @@ -906,9 +883,9 @@ impl BackoffAuthoringBlocksStrategy for () { #[cfg(test)] mod test { use super::*; + use sp_api::NumberFor; use std::time::{Duration, Instant}; use substrate_test_runtime_client::runtime::{Block, Header}; - use sp_api::NumberFor; const SLOT_DURATION: Duration = Duration::from_millis(6000); @@ -945,10 +922,7 @@ mod test { } // but we cap it to a maximum of 20 slots - assert_eq!( - super::slot_lenience_linear(1u64.into(), &slot(23)), - Some(SLOT_DURATION * 20), - ); + assert_eq!(super::slot_lenience_linear(1u64.into(), &slot(23)), Some(SLOT_DURATION * 20),); } #[test] @@ -1041,7 +1015,15 @@ mod test { let slot_now = 2; let should_backoff: Vec = (slot_now..1000) - .map(|s| strategy.should_backoff(head_number, head_slot.into(), finalized_number, s.into(), "slots")) + .map(|s| { + strategy.should_backoff( + head_number, + head_slot.into(), + finalized_number, + s.into(), + "slots", + ) + }) .collect(); // Should always be false, since the head isn't advancing @@ -1105,7 +1087,15 @@ mod test { let max_interval = strategy.max_interval; let should_backoff: Vec = (slot_now..200) - .map(|s| strategy.should_backoff(head_number, head_slot.into(), finalized_number, s.into(), "slots")) + .map(|s| { + strategy.should_backoff( + head_number, + head_slot.into(), + finalized_number, + s.into(), + "slots", + ) + }) .collect(); // Should backoff (true) until we are `max_interval` number of slots ahead of the chain @@ -1123,11 +1113,7 @@ mod test { }; let finalized_number = 2; - let mut head_state = HeadState { - head_number: 4, - head_slot: 10, - slot_now: 11, - }; + let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: 11 }; let should_backoff = |head_state: &HeadState| -> bool { >>::should_backoff( @@ -1155,32 +1141,27 @@ mod test { // Gradually start to backoff more and more frequently let expected = [ false, false, false, false, false, // no effect - true, false, - true, false, // 1:1 - true, true, false, - true, true, false, // 2:1 - true, true, true, false, - true, true, true, false, // 3:1 - true, true, true, true, false, - true, true, true, true, false, // 4:1 - true, true, true, true, true, false, - true, true, true, true, true, false, // 5:1 - true, true, true, true, true, true, false, - true, true, true, true, true, true, false, // 6:1 - true, true, true, true, true, true, true, false, - true, true, true, true, true, true, true, false, // 7:1 - true, true, true, true, true, true, true, true, false, - true, true, true, true, true, true, true, true, false, // 8:1 - true, true, true, true, true, true, true, true, true, false, - true, true, true, true, true, true, true, true, true, false, // 9:1 - true, true, true, true, true, true, true, true, true, true, false, - true, true, true, true, true, true, true, true, true, true, false, // 10:1 - true, true, true, true, true, true, true, true, true, true, true, false, - true, true, true, true, true, true, true, true, true, true, true, false, // 11:1 - true, true, true, true, true, true, true, true, true, true, true, true, false, - true, true, true, true, true, true, true, true, true, true, true, true, false, // 12:1 + true, false, true, false, // 1:1 + true, true, false, true, true, false, // 2:1 + true, true, true, false, true, true, true, false, // 3:1 + true, true, true, true, false, true, true, true, true, false, // 4:1 + true, true, true, true, true, false, true, true, true, true, true, false, // 5:1 + true, true, true, true, true, true, false, true, true, true, true, true, true, + false, // 6:1 + true, true, true, true, true, true, true, false, true, true, true, true, true, true, + true, false, // 7:1 + true, true, true, true, true, true, true, true, false, true, true, true, true, true, + true, true, true, false, // 8:1 + true, true, true, true, true, true, true, true, true, false, true, true, true, true, + true, true, true, true, true, false, // 9:1 + true, true, true, true, true, true, true, true, true, true, false, true, true, true, + true, true, true, true, true, true, true, false, // 10:1 + true, true, true, true, true, true, true, true, true, true, true, false, true, true, + true, true, true, true, true, true, true, true, true, false, // 11:1 + true, true, true, true, true, true, true, true, true, true, true, true, false, true, + true, true, true, true, true, true, true, true, true, true, true, false, // 12:1 true, true, true, true, - ]; + ]; assert_eq!(backoff.as_slice(), &expected[..]); } @@ -1195,11 +1176,7 @@ mod test { let finalized_number = 2; let starting_slot = 11; - let mut head_state = HeadState { - head_number: 4, - head_slot: 10, - slot_now: starting_slot, - }; + let mut head_state = HeadState { head_number: 4, head_slot: 10, slot_now: starting_slot }; let should_backoff = |head_state: &HeadState| -> bool { >>::should_backoff( @@ -1240,30 +1217,22 @@ mod test { assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92); assert_eq!(last_slot - last_two_claimed.next().unwrap(), 92 + expected_distance); - let intervals: Vec<_> = slots_claimed - .windows(2) - .map(|x| x[1] - x[0]) - .collect(); + let intervals: Vec<_> = slots_claimed.windows(2).map(|x| x[1] - x[0]).collect(); // The key thing is that the distance between claimed slots is capped to `max_interval + 1` // assert_eq!(max_observed_interval, Some(&expected_distance)); assert_eq!(intervals.iter().max(), Some(&expected_distance)); // But lets assert all distances, which we expect to grow linearly until `max_interval + 1` - let expected_intervals: Vec<_> = (0..497) - .map(|i| (i/2).max(1).min(expected_distance) ) - .collect(); + let expected_intervals: Vec<_> = + (0..497).map(|i| (i / 2).max(1).min(expected_distance)).collect(); assert_eq!(intervals, expected_intervals); } fn run_until_max_interval(param: BackoffAuthoringOnFinalizedHeadLagging) -> (u64, u64) { let finalized_number = 0; - let mut head_state = HeadState { - head_number: 0, - head_slot: 0, - slot_now: 1, - }; + let mut head_state = HeadState { head_number: 0, head_slot: 0, slot_now: 1 }; let should_backoff = |head_state: &HeadState| -> bool { >>::should_backoff( @@ -1277,8 +1246,8 @@ mod test { }; // Number of blocks until we reach the max interval - let block_for_max_interval - = param.max_interval * param.authoring_bias + param.unfinalized_slack; + let block_for_max_interval = + param.max_interval * param.authoring_bias + param.unfinalized_slack; while head_state.head_number < block_for_max_interval { if should_backoff(&head_state) { @@ -1294,15 +1263,15 @@ mod test { } // Denoting - // C: unfinalized_slack - // M: authoring_bias - // X: max_interval + // C: unfinalized_slack + // M: authoring_bias + // X: max_interval // then the number of slots to reach the max interval can be computed from - // (start_slot + C) + M * sum(n, 1, X) + // (start_slot + C) + M * sum(n, 1, X) // or - // (start_slot + C) + M * X*(X+1)/2 + // (start_slot + C) + M * X*(X+1)/2 fn expected_time_to_reach_max_interval( - param: &BackoffAuthoringOnFinalizedHeadLagging + param: &BackoffAuthoringOnFinalizedHeadLagging, ) -> (u64, u64) { let c = param.unfinalized_slack; let m = param.authoring_bias; diff --git a/substrate/client/consensus/slots/src/slots.rs b/substrate/client/consensus/slots/src/slots.rs index 1e6dadcdf5cf50d7be7d6a5bf4492fa6a8df8735..d994aff1fc61212c3b93fc21fc869652b92ab88d 100644 --- a/substrate/client/consensus/slots/src/slots.rs +++ b/substrate/client/consensus/slots/src/slots.rs @@ -20,23 +20,21 @@ //! //! This is used instead of `futures_timer::Interval` because it was unreliable. -use super::{Slot, InherentDataProviderExt}; +use super::{InherentDataProviderExt, Slot}; use sp_consensus::{Error, SelectChain}; -use sp_inherents::{InherentData, CreateInherentDataProviders, InherentDataProvider}; +use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use std::time::{Duration, Instant}; use futures_timer::Delay; +use std::time::{Duration, Instant}; /// Returns current duration since unix epoch. pub fn duration_now() -> Duration { use std::time::SystemTime; let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| panic!( - "Current time {:?} is before unix epoch. Something is wrong: {:?}", - now, - e, - )) + now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| { + panic!("Current time {:?} is before unix epoch. Something is wrong: {:?}", now, e,) + }) } /// Returns the duration until the next slot from now. @@ -104,11 +102,7 @@ pub(crate) struct Slots { impl Slots { /// Create a new `Slots` stream. - pub fn new( - slot_duration: Duration, - create_inherent_data_providers: IDP, - client: C, - ) -> Self { + pub fn new(slot_duration: Duration, create_inherent_data_providers: IDP, client: C) -> Self { Slots { last_slot: 0.into(), slot_duration, @@ -135,7 +129,7 @@ where // schedule wait. let wait_dur = time_until_next_slot(self.slot_duration); Some(Delay::new(wait_dur)) - } + }, Some(d) => Some(d), }; @@ -161,11 +155,12 @@ where ); // Let's try at the next slot.. self.inner_delay.take(); - continue; - } + continue + }, }; - let inherent_data_providers = self.create_inherent_data_providers + let inherent_data_providers = self + .create_inherent_data_providers .create_inherent_data_providers(chain_head.hash(), ()) .await?; diff --git a/substrate/client/consensus/uncles/src/lib.rs b/substrate/client/consensus/uncles/src/lib.rs index cfae0528a627d4b20ee15b5a8f0c565475a4eafa..368a994cfe520ad5157c5b2227559664f457cf0f 100644 --- a/substrate/client/consensus/uncles/src/lib.rs +++ b/substrate/client/consensus/uncles/src/lib.rs @@ -19,7 +19,7 @@ //! Uncles functionality for Substrate. use sc_client_api::ProvideUncles; -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -34,7 +34,8 @@ const MAX_UNCLE_GENERATIONS: u32 = 8; pub fn create_uncles_inherent_data_provider( client: &C, parent: B::Hash, -) -> Result, sc_client_api::blockchain::Error> where +) -> Result, sc_client_api::blockchain::Error> +where B: BlockT, C: ProvideUncles, { diff --git a/substrate/client/db/src/bench.rs b/substrate/client/db/src/bench.rs index 4b34182a1c3bd75080f04e75e5ec8e57b476df74..c21119bd1176ffa9689164dceae93bcb6088e71a 100644 --- a/substrate/client/db/src/bench.rs +++ b/substrate/client/db/src/bench.rs @@ -18,27 +18,31 @@ //! State backend that's useful for benchmarking -use std::sync::Arc; -use std::cell::{Cell, RefCell}; -use std::collections::HashMap; +use std::{ + cell::{Cell, RefCell}, + collections::HashMap, + sync::Arc, +}; -use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key}; +use crate::storage_cache::{new_shared_cache, CachingState, SharedCache}; +use hash_db::{Hasher, Prefix}; +use kvdb::{DBTransaction, KeyValueDB}; use sp_core::{ + hexdisplay::HexDisplay, storage::{ChildInfo, TrackedStorageKey}, - hexdisplay::HexDisplay }; -use sp_runtime::traits::{Block as BlockT, HashFor}; -use sp_runtime::Storage; +use sp_runtime::{ + traits::{Block as BlockT, HashFor}, + Storage, +}; use sp_state_machine::{ - DBValue, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection, ProofRecorder, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, + StorageCollection, }; -use kvdb::{KeyValueDB, DBTransaction}; -use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; +use sp_trie::{prefixed_key, MemoryDB}; -type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor ->; +type DbState = + sp_state_machine::TrieBackend>>, HashFor>; type State = CachingState, B>; @@ -53,14 +57,17 @@ impl sp_state_machine::Storage> for StorageDb>(key, prefix); if let Some(recorder) = &self.proof_recorder { if let Some(v) = recorder.get(&key) { - return Ok(v.clone()); + return Ok(v.clone()) } - let backend_value = self.db.get(0, &prefixed_key) + let backend_value = self + .db + .get(0, &prefixed_key) .map_err(|e| format!("Database backend error: {:?}", e))?; recorder.record(key.clone(), backend_value.clone()); Ok(backend_value) } else { - self.db.get(0, &prefixed_key) + self.db + .get(0, &prefixed_key) .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -91,7 +98,11 @@ pub struct BenchmarkingState { impl BenchmarkingState { /// Create a new instance that creates a database in a temporary dir. - pub fn new(genesis: Storage, _cache_size_mb: Option, record_proof: bool) -> Result { + pub fn new( + genesis: Storage, + _cache_size_mb: Option, + record_proof: bool, + ) -> Result { let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); @@ -114,14 +125,17 @@ impl BenchmarkingState { state.add_whitelist_to_tracker(); state.reopen()?; - let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( - genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), - child_delta, - ); + let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| { + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + ) + }); + let (root, transaction): (B::Hash, _) = + state.state.borrow_mut().as_mut().unwrap().full_storage_root( + genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), + child_delta, + ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); state.commit(root, transaction, Vec::new(), Vec::new())?; @@ -143,12 +157,12 @@ impl BenchmarkingState { let storage_db = Arc::new(StorageDb:: { db, proof_recorder: self.proof_recorder.clone(), - _block: Default::default() + _block: Default::default(), }); *self.state.borrow_mut() = Some(State::new( DbState::::new(storage_db, self.root.get()), self.shared_cache.clone(), - None + None, )); Ok(()) } @@ -178,7 +192,7 @@ impl BenchmarkingState { let key_tracker = if let Some(childtrie) = childtrie { child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) - } else { + } else { &mut main_key_tracker }; @@ -193,7 +207,7 @@ impl BenchmarkingState { let should_log = !tracker.has_been_read(); tracker.add_read(); should_log - } + }, }; if should_log { @@ -215,7 +229,7 @@ impl BenchmarkingState { let key_tracker = if let Some(childtrie) = childtrie { child_key_tracker.entry(childtrie.to_vec()).or_insert_with(|| HashMap::new()) - } else { + } else { &mut main_key_tracker }; @@ -231,7 +245,7 @@ impl BenchmarkingState { let should_log = !tracker.has_been_written(); tracker.add_write(); should_log - } + }, }; if should_log { @@ -269,7 +283,7 @@ fn state_err() -> String { } impl StateBackend> for BenchmarkingState { - type Error = as StateBackend>>::Error; + type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; @@ -289,7 +303,11 @@ impl StateBackend> for BenchmarkingState { key: &[u8], ) -> Result>, Self::Error> { self.add_read_key(Some(child_info.storage_key()), key); - self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -303,7 +321,11 @@ impl StateBackend> for BenchmarkingState { key: &[u8], ) -> Result { self.add_read_key(Some(child_info.storage_key()), key); - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -317,7 +339,11 @@ impl StateBackend> for BenchmarkingState { key: &[u8], ) -> Result>, Self::Error> { self.add_read_key(Some(child_info.storage_key()), key); - self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -340,8 +366,13 @@ impl StateBackend> for BenchmarkingState { f: F, allow_missing: bool, ) -> Result { - self.state.borrow().as_ref().ok_or_else(state_err)? - .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + self.state.borrow().as_ref().ok_or_else(state_err)?.apply_to_key_values_while( + child_info, + prefix, + start_at, + f, + allow_missing, + ) } fn apply_to_keys_while bool>( @@ -368,17 +399,29 @@ impl StateBackend> for BenchmarkingState { fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta)) + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.storage_root(delta)) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -389,17 +432,16 @@ impl StateBackend> for BenchmarkingState { self.state.borrow().as_ref().map_or(Default::default(), |s| s.keys(prefix)) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { + fn as_trie_backend( + &mut self, + ) -> Option<&sp_state_machine::TrieBackend>> { None } @@ -425,7 +467,8 @@ impl StateBackend> for BenchmarkingState { let mut record = self.record.take(); record.extend(keys); self.record.set(record); - db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; + db.write(db_transaction) + .map_err(|_| String::from("Error committing transaction"))?; self.root.set(storage_root); self.db.set(Some(db)); @@ -455,7 +498,8 @@ impl StateBackend> for BenchmarkingState { None => db_transaction.delete(0, &key), } } - db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; + db.write(db_transaction) + .map_err(|_| String::from("Error committing transaction"))?; self.db.set(Some(db)); } @@ -519,24 +563,20 @@ impl StateBackend> for BenchmarkingState { let reads = tracker.reads.min(1); let writes = tracker.writes.min(1); if let Some(prefix_tracker) = prefix_key_tracker.get_mut(&prefix) { - prefix_tracker.0 += reads; - prefix_tracker.1 += writes; + prefix_tracker.0 += reads; + prefix_tracker.1 += writes; } else { - prefix_key_tracker.insert( - prefix, - ( - reads, - writes, - tracker.whitelisted, - ), - ); + prefix_key_tracker.insert(prefix, (reads, writes, tracker.whitelisted)); } } }); - prefix_key_tracker.iter().map(|(key, tracker)| -> (Vec, u32, u32, bool) { + prefix_key_tracker + .iter() + .map(|(key, tracker)| -> (Vec, u32, u32, bool) { (key.to_vec(), tracker.0, tracker.1, tracker.2) - }).collect::>() + }) + .collect::>() } fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) { @@ -544,7 +584,10 @@ impl StateBackend> for BenchmarkingState { } fn usage_info(&self) -> sp_state_machine::UsageInfo { - self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) + self.state + .borrow() + .as_ref() + .map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) } fn proof_size(&self) -> Option { @@ -585,8 +628,8 @@ mod test { #[test] fn read_to_main_and_child_tries() { - let bench_state = BenchmarkingState::::new(Default::default(), None, false) - .unwrap(); + let bench_state = + BenchmarkingState::::new(Default::default(), None, false).unwrap(); for _ in 0..2 { let child1 = sp_core::storage::ChildInfo::new_default(b"child1"); @@ -600,16 +643,14 @@ mod test { bench_state.child_storage(&child1, b"bar").unwrap(); bench_state.child_storage(&child2, b"bar").unwrap(); - bench_state.commit( - Default::default(), - Default::default(), - vec![ - ("foo".as_bytes().to_vec(), None) - ], - vec![ - ("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)]) - ] - ).unwrap(); + bench_state + .commit( + Default::default(), + Default::default(), + vec![("foo".as_bytes().to_vec(), None)], + vec![("child1".as_bytes().to_vec(), vec![("foo".as_bytes().to_vec(), None)])], + ) + .unwrap(); let rw_tracker = bench_state.read_write_count(); assert_eq!(rw_tracker.0, 6); diff --git a/substrate/client/db/src/cache/list_cache.rs b/substrate/client/db/src/cache/list_cache.rs index 341105b16a5b3f324fa35102a4e6085e8a6d1a78..9499ae2a89f454c2c2d4f1918fdd3587f6813456 100644 --- a/substrate/client/db/src/cache/list_cache.rs +++ b/substrate/client/db/src/cache/list_cache.rs @@ -41,18 +41,18 @@ //! Finalized entry E1 is pruned when block B is finalized so that: //! EntryAt(B.number - prune_depth).points_to(E1) -use std::collections::{BTreeSet, BTreeMap}; +use std::collections::{BTreeMap, BTreeSet}; use log::warn; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, Zero, Bounded, CheckedSub -}; +use sp_runtime::traits::{Block as BlockT, Bounded, CheckedSub, NumberFor, Zero}; -use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; -use crate::cache::list_entry::{Entry, StorageEntry}; -use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; +use crate::cache::{ + list_entry::{Entry, StorageEntry}, + list_storage::{Metadata, Storage, StorageTransaction}, + CacheItemT, ComplexBlockId, EntryType, +}; /// Pruning strategy. #[derive(Debug, Clone, Copy)] @@ -132,8 +132,8 @@ impl> ListCache pruning_strategy: PruningStrategy>, best_finalized_block: ComplexBlockId, ) -> ClientResult { - let (best_finalized_entry, unfinalized) = storage.read_meta() - .and_then(|meta| read_forks(&storage, meta))?; + let (best_finalized_entry, unfinalized) = + storage.read_meta().and_then(|meta| read_forks(&storage, meta))?; Ok(ListCache { storage, @@ -167,7 +167,7 @@ impl> ListCache // BUT since we're not guaranteeing to provide correct values for forks // behind the finalized block, check if the block is finalized first if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { - return Err(ClientError::NotInFinalizedChain); + return Err(ClientError::NotInFinalizedChain) } self.best_finalized_entry.as_ref() @@ -184,18 +184,21 @@ impl> ListCache match self.find_unfinalized_fork(&at)? { Some(fork) => Some(&fork.head), None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) if chain::is_connected_to_block( - &self.storage, - &at, - &best_finalized_entry.valid_from, - )? => Some(best_finalized_entry), + Some(best_finalized_entry) + if chain::is_connected_to_block( + &self.storage, + &at, + &best_finalized_entry.valid_from, + )? => + Some(best_finalized_entry), _ => None, }, } }; match head { - Some(head) => head.search_best_before(&self.storage, at.number) + Some(head) => head + .search_best_before(&self.storage, at.number) .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), None => Ok(None), } @@ -213,7 +216,8 @@ impl> ListCache entry_type: EntryType, operations: &mut CommitOperations, ) -> ClientResult<()> { - Ok(operations.append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) + Ok(operations + .append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) } /// When previously inserted block is finalized. @@ -242,25 +246,25 @@ impl> ListCache for op in ops.operations { match op { CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); + CommitOperation holds valid references while cache is locked; qed", + ); fork.best_block = Some(best_block); }, CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); + CommitOperation holds valid references while cache is locked; qed", + ); fork.best_block = Some(entry.valid_from.clone()); fork.head = entry; }, CommitOperation::AddNewFork(entry) => { - self.unfinalized.push(Fork { - best_block: Some(entry.valid_from.clone()), - head: entry, - }); + self.unfinalized + .push(Fork { best_block: Some(entry.valid_from.clone()), head: entry }); }, CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { self.best_finalized_block = block; @@ -275,7 +279,9 @@ impl> ListCache for (fork_index, updated_fork) in forks.into_iter().rev() { match updated_fork { Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, - None => { self.unfinalized.remove(fork_index); }, + None => { + self.unfinalized.remove(fork_index); + }, } } }, @@ -296,17 +302,17 @@ impl> ListCache let prev_operation = operations.operations.last(); debug_assert!( entry_type != EntryType::Final || - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) - => best_finalized_block.hash == parent.hash, - _ => false, - } + self.best_finalized_block.hash == parent.hash || + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => + best_finalized_block.hash == parent.hash, + _ => false, + } ); // we do not store any values behind finalized if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { - return Ok(None); + return Ok(None) } // if the block is not final, it is possibly appended to/forking from existing unfinalized fork @@ -316,14 +322,14 @@ impl> ListCache // when value hasn't changed and block isn't final, there's nothing we need to do if value.is_none() { - return Ok(None); + return Ok(None) } // first: try to find fork that is known to has the best block we're appending to for (index, fork) in self.unfinalized.iter().enumerate() { if fork.try_append(&parent) { fork_and_action = Some((index, ForkAppendResult::Append)); - break; + break } } @@ -331,11 +337,14 @@ impl> ListCache // - we're appending to the fork for the first time after restart; // - we're forking existing unfinalized fork from the middle; if fork_and_action.is_none() { - let best_finalized_entry_block = self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); + let best_finalized_entry_block = + self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); for (index, fork) in self.unfinalized.iter().enumerate() { - if let Some(action) = fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? { + if let Some(action) = + fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? + { fork_and_action = Some((index, action)); - break; + break } } } @@ -350,9 +359,14 @@ impl> ListCache }; tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); + let operation = + CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)) }, // fork from the middle of unfinalized fork Some((_, ForkAppendResult::Fork(prev_valid_from))) => { @@ -363,9 +377,14 @@ impl> ListCache }; tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)) }, None => (), } @@ -389,12 +408,17 @@ impl> ListCache return Ok(match new_storage_entry { Some(new_storage_entry) => { tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); Some(operation) }, None => None, - }); + }) } // cleanup database from abandoned unfinalized forks and obsolete finalized entries @@ -404,7 +428,11 @@ impl> ListCache match new_storage_entry { Some(new_storage_entry) => { tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::BlockFinalized(block.clone(), Some(new_storage_entry.into_entry(block)), abandoned_forks); + let operation = CommitOperation::BlockFinalized( + block.clone(), + Some(new_storage_entry.into_entry(block)), + abandoned_forks, + ); tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); Ok(Some(operation)) }, @@ -423,16 +451,16 @@ impl> ListCache let prev_operation = operations.operations.last(); debug_assert!( self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) - => best_finalized_block.hash == parent.hash, - _ => false, - } + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => + best_finalized_block.hash == parent.hash, + _ => false, + } ); // there could be at most one entry that is finalizing - let finalizing_entry = self.storage.read_entry(&block)? - .map(|entry| entry.into_entry(block.clone())); + let finalizing_entry = + self.storage.read_entry(&block)?.map(|entry| entry.into_entry(block.clone())); // cleanup database from abandoned unfinalized forks and obsolete finalized entries let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); @@ -457,12 +485,13 @@ impl> ListCache for (index, fork) in self.unfinalized.iter().enumerate() { // we only need to truncate fork if its head is ancestor of truncated block if fork.head.valid_from.number < reverted_block.number { - continue; + continue } // we only need to truncate fork if its head is connected to truncated block - if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? { - continue; + if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? + { + continue } let updated_fork = fork.truncate( @@ -485,7 +514,7 @@ impl> ListCache fn prune_finalized_entries>( &self, tx: &mut Tx, - block: &ComplexBlockId + block: &ComplexBlockId, ) { let prune_depth = match self.pruning_strategy { PruningStrategy::ByDepth(prune_depth) => prune_depth, @@ -515,18 +544,13 @@ impl> ListCache }; // truncate ancient entry - tx.insert_storage_entry(&ancient_block, &StorageEntry { - prev_valid_from: None, - value: current_entry.value, - }); + tx.insert_storage_entry( + &ancient_block, + &StorageEntry { prev_valid_from: None, value: current_entry.value }, + ); // destroy 'fork' ending with previous entry - destroy_fork( - first_entry_to_truncate, - &self.storage, - tx, - None, - ) + destroy_fork(first_entry_to_truncate, &self.storage, tx, None) }; if let Err(error) = do_pruning() { @@ -543,16 +567,17 @@ impl> ListCache ) -> BTreeSet { // if some block has been finalized already => take it into account let prev_abandoned_forks = match prev_operation { - Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => Some(abandoned_forks), + Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => + Some(abandoned_forks), _ => None, }; let mut destroyed = prev_abandoned_forks.cloned().unwrap_or_else(|| BTreeSet::new()); - let live_unfinalized = self.unfinalized.iter() - .enumerate() - .filter(|(idx, _)| prev_abandoned_forks + let live_unfinalized = self.unfinalized.iter().enumerate().filter(|(idx, _)| { + prev_abandoned_forks .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) - .unwrap_or(true)); + .unwrap_or(true) + }); for (index, fork) in live_unfinalized { if fork.head.valid_from.number == block.number { destroyed.insert(index); @@ -574,7 +599,7 @@ impl> ListCache ) -> ClientResult>> { for unfinalized in &self.unfinalized { if unfinalized.matches(&self.storage, block)? { - return Ok(Some(&unfinalized)); + return Ok(Some(&unfinalized)) } } @@ -597,7 +622,8 @@ impl Fork { let range = self.head.search_best_range_before(storage, block.number)?; match range { None => Ok(false), - Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), + Some((begin, end)) => + chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), } } @@ -628,19 +654,19 @@ impl Fork { // check if the parent is connected to the beginning of the range if !chain::is_connected_to_block(storage, parent, &begin)? { - return Ok(None); + return Ok(None) } // the block is connected to the begin-entry. If begin is the head entry // => we need to append new block to the fork if begin == self.head.valid_from { - return Ok(Some(ForkAppendResult::Append)); + return Ok(Some(ForkAppendResult::Append)) } // the parent block belongs to this fork AND it is located after last finalized entry // => we need to make a new fork if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) { - return Ok(Some(ForkAppendResult::Fork(begin))); + return Ok(Some(ForkAppendResult::Fork(begin))) } Ok(None) @@ -653,12 +679,7 @@ impl Fork { tx: &mut Tx, best_finalized_block: Option>, ) -> ClientResult<()> { - destroy_fork( - self.head.valid_from.clone(), - storage, - tx, - best_finalized_block, - ) + destroy_fork(self.head.valid_from.clone(), storage, tx, best_finalized_block) } /// Truncate fork by deleting all entries that are descendants of given block. @@ -674,18 +695,15 @@ impl Fork { // read pointer to previous entry let entry = storage.require_entry(¤t)?; - // truncation stops when we have reached the ancestor of truncated block + // truncation stops when we have reached the ancestor of truncated block if current.number < reverting_block { // if we have reached finalized block => destroy fork if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(None); + return Ok(None) } // else fork needs to be updated - return Ok(Some(Fork { - best_block: None, - head: entry.into_entry(current), - })); + return Ok(Some(Fork { best_block: None, head: entry.into_entry(current) })) } tx.remove_storage_entry(¤t); @@ -707,7 +725,9 @@ impl Default for CommitOperations { // This should never be allowed for non-test code to avoid revealing its internals. #[cfg(test)] -impl From>> for CommitOperations { +impl From>> + for CommitOperations +{ fn from(operations: Vec>) -> Self { CommitOperations { operations } } @@ -725,30 +745,36 @@ impl CommitOperations { Some(last_operation) => last_operation, None => { self.operations.push(new_operation); - return; + return }, }; // we are able (and obliged to) to merge two consequent block finalization operations match last_operation { - CommitOperation::BlockFinalized(old_finalized_block, old_finalized_entry, old_abandoned_forks) => { - match new_operation { - CommitOperation::BlockFinalized(new_finalized_block, new_finalized_entry, new_abandoned_forks) => { - self.operations.push(CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - )); - }, - _ => { - self.operations.push(CommitOperation::BlockFinalized( - old_finalized_block, - old_finalized_entry, - old_abandoned_forks, - )); - self.operations.push(new_operation); - }, - } + CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + ) => match new_operation { + CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + ) => { + self.operations.push(CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + )); + }, + _ => { + self.operations.push(CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + )); + self.operations.push(new_operation); + }, }, _ => { self.operations.push(last_operation); @@ -759,7 +785,12 @@ impl CommitOperations { } /// Destroy fork by deleting all unfinalized entries. -pub fn destroy_fork, Tx: StorageTransaction>( +pub fn destroy_fork< + Block: BlockT, + T: CacheItemT, + S: Storage, + Tx: StorageTransaction, +>( head_valid_from: ComplexBlockId, storage: &S, tx: &mut Tx, @@ -770,7 +801,7 @@ pub fn destroy_fork, Tx: Stor // optionally: deletion stops when we found entry at finalized block if let Some(best_finalized_block) = best_finalized_block { if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(()); + return Ok(()) } } @@ -788,8 +819,8 @@ pub fn destroy_fork, Tx: Stor /// Blockchain related functions. mod chain { - use sp_runtime::traits::Header as HeaderT; use super::*; + use sp_runtime::traits::Header as HeaderT; /// Is the block1 connected both ends of the range. pub fn is_connected_to_range>( @@ -798,8 +829,8 @@ mod chain { range: (&ComplexBlockId, Option<&ComplexBlockId>), ) -> ClientResult { let (begin, end) = range; - Ok(is_connected_to_block(storage, block, begin)? - && match end { + Ok(is_connected_to_block(storage, block, begin)? && + match end { Some(end) => is_connected_to_block(storage, block, end)?, None => true, }) @@ -812,10 +843,12 @@ mod chain { block2: &ComplexBlockId, ) -> ClientResult { let (begin, end) = if *block1 > *block2 { (block2, block1) } else { (block1, block2) }; - let mut current = storage.read_header(&end.hash)? + let mut current = storage + .read_header(&end.hash)? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; while *current.number() > begin.number { - current = storage.read_header(current.parent_hash())? + current = storage + .read_header(current.parent_hash())? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; } @@ -829,11 +862,10 @@ mod chain { best_finalized_block: NumberFor, ) -> ClientResult { if block.number > best_finalized_block { - return Ok(false); + return Ok(false) } - storage.read_id(block.number) - .map(|hash| hash.as_ref() == Some(&block.hash)) + storage.read_id(block.number).map(|hash| hash.as_ref() == Some(&block.hash)) } } @@ -843,17 +875,19 @@ fn read_forks>( meta: Metadata, ) -> ClientResult<(Option>, Vec>)> { let finalized = match meta.finalized { - Some(finalized) => Some(storage.require_entry(&finalized)? - .into_entry(finalized)), + Some(finalized) => Some(storage.require_entry(&finalized)?.into_entry(finalized)), None => None, }; - let unfinalized = meta.unfinalized.into_iter() - .map(|unfinalized| storage.require_entry(&unfinalized) - .map(|storage_entry| Fork { + let unfinalized = meta + .unfinalized + .into_iter() + .map(|unfinalized| { + storage.require_entry(&unfinalized).map(|storage_entry| Fork { best_block: None, head: storage_entry.into_entry(unfinalized), - })) + }) + }) .collect::>()?; Ok((finalized, unfinalized)) @@ -861,10 +895,10 @@ fn read_forks>( #[cfg(test)] mod tests { - use substrate_test_runtime_client::runtime::H256; - use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction}; use super::*; + use crate::cache::list_storage::tests::{DummyStorage, DummyTransaction, FaultyStorage}; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; + use substrate_test_runtime_client::runtime::H256; type Block = RawBlock>; @@ -882,7 +916,11 @@ mod tests { fn test_header(number: u64) -> Header { Header { - parent_hash: if number == 0 { Default::default() } else { test_header(number - 1).hash() }, + parent_hash: if number == 0 { + Default::default() + } else { + test_header(number - 1).hash() + }, number, state_root: Default::default(), extrinsics_root: Default::default(), @@ -909,28 +947,54 @@ mod tests { // when block is earlier than best finalized block AND it is not finalized // --- 50 --- // ----------> [100] - assert!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap().value_at_block(&test_id(50)).is_err()); + assert!(ListCache::<_, u64, _>::new( + DummyStorage::new(), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(50)) + .is_err()); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(50)) + .unwrap(), + Some((test_id(30), Some(test_id(100)), 30)) + ); // when block is the best finalized block AND value is some // ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(100, H256::from_low_u64_be(100)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(100)).unwrap(), Some((test_id(100), None, 100))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(100, H256::from_low_u64_be(100)) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(100)) + .unwrap(), + Some((test_id(100), None, 100)) + ); // when block is parallel to the best finalized block // ---- 100 // ---> [100] @@ -938,81 +1002,138 @@ mod tests { DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).is_err()); + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)) + .is_err()); // when block is later than last finalized block AND there are no forks AND finalized value is Some // ---> [100] --- 200 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(200)).unwrap(), Some((test_id(100), None, 100))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } + ), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(200)) + .unwrap(), + Some((test_id(100), None, 100)) + ); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some // --- 3 // ---> [2] /---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some((correct_id(2), None, 2)) + ); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block // --- 2 --- 3 // 1 /---> [2] ---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 2)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 1, 3)).unwrap(), None); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_header(test_header(1)) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 2)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 1, 3)) + .unwrap(), + None + ); // when block is later than last finalized block AND it appends to unfinalized fork from the end // AND unfinalized value is Some // ---> [2] ---> [4] ---> 5 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_header(test_header(4)) + .with_header(test_header(5)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&correct_id(5)) + .unwrap(), + Some((correct_id(4), None, 4)) + ); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] // \--- 3 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } + ) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some((correct_id(2), None, 2)) + ); } #[test] @@ -1022,7 +1143,8 @@ mod tests { // when trying to insert block < finalized number let mut ops = Default::default(); - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + .unwrap() .do_on_block_insert( &mut DummyTransaction::new(), test_id(49), @@ -1030,9 +1152,12 @@ mod tests { Some(50), nfin, &mut ops, - ).unwrap().is_none()); + ) + .unwrap() + .is_none()); // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + .unwrap() .do_on_block_insert( &mut DummyTransaction::new(), test_id(99), @@ -1040,7 +1165,9 @@ mod tests { Some(100), nfin, &Default::default(), - ).unwrap().is_none()); + ) + .unwrap() + .is_none()); // when trying to insert non-final block AND it appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block @@ -1048,12 +1175,23 @@ mod tests { DummyStorage::new() .with_meta(None, vec![test_id(4)]) .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + test_id(2), + ) + .unwrap(); cache.unfinalized[0].best_block = Some(test_id(4)); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + test_id(4), + test_id(5), + Some(4), + nfin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::AppendNewBlock(0, test_id(5))), ); assert!(tx.inserted_entries().is_empty()); @@ -1063,12 +1201,24 @@ mod tests { // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + test_id(4), + test_id(5), + Some(5), + nfin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 })), ); assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] }) + ); // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block @@ -1077,18 +1227,22 @@ mod tests { .with_meta(None, vec![correct_id(4)]) .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + test_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(4), - nfin, - &Default::default(), - ).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(4), + nfin, + &Default::default(), + ) + .unwrap(), Some(CommitOperation::AppendNewBlock(0, correct_id(5))), ); assert!(tx.inserted_entries().is_empty()); @@ -1098,40 +1252,64 @@ mod tests { // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(5), - nfin, - &Default::default(), - ).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(5), + nfin, + &Default::default(), + ) + .unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 })), ); assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] }) + ); // when trying to insert non-final block AND it forks unfinalized fork let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }, + ) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(3), + fork_id(0, 3, 4), + Some(14), + nfin, + &Default::default() + ) .unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 })), ); assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(2)), + unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] + }) + ); // when trying to insert non-final block AND there are no unfinalized forks // AND value is the same as last finalized @@ -1139,11 +1317,21 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + nfin, + &Default::default() + ) .unwrap(), None, ); @@ -1156,23 +1344,46 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + nfin, + &Default::default() + ) .unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 })), ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] }) + ); // when inserting finalized entry AND there are no previous finalized entries - let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)).unwrap(); + let cache = + ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()) + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + fin, + &Default::default() + ) .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), @@ -1182,17 +1393,31 @@ mod tests { ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) + ); // when inserting finalized entry AND value is the same as in previous finalized let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + fin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), ); assert!(tx.inserted_entries().is_empty()); @@ -1201,7 +1426,16 @@ mod tests { // when inserting finalized entry AND value differs from previous finalized let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()).unwrap(), + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + fin, + &Default::default() + ) + .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -1210,7 +1444,10 @@ mod tests { ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) + ); // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted let cache = ListCache::new( @@ -1218,12 +1455,27 @@ mod tests { .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + fin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )), ); } @@ -1234,12 +1486,19 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), + cache + .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) + .unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), ); assert!(tx.inserted_entries().is_empty()); @@ -1253,12 +1512,19 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - PruningStrategy::ByDepth(1024), correct_id(4) - ).unwrap(); + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, + ), + PruningStrategy::ByDepth(1024), + correct_id(4), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()).unwrap(), + cache + .do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()) + .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(5), Some(Entry { valid_from: correct_id(5), value: 5 }), @@ -1267,19 +1533,30 @@ mod tests { ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] }) + ); // finalization removes abandoned forks let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + cache + .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )), ); } @@ -1289,34 +1566,50 @@ mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, + ) + .with_entry( + correct_id(6), + StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); // when new block is appended to unfinalized fork cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); // when new entry is appended to unfinalized fork - cache.on_transaction_commit(vec![ - CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 }), - ].into()); + cache.on_transaction_commit( + vec![CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 })] + .into(), + ); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); // when new fork is added - cache.on_transaction_commit(vec![ - CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 }), - ].into()); + cache.on_transaction_commit( + vec![CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 })] + .into(), + ); assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit(vec![CommitOperation::BlockFinalized( - correct_id(20), - Some(Entry { valid_from: correct_id(20), value: 20 }), - vec![0, 1, 2].into_iter().collect(), - )].into()); + cache.on_transaction_commit( + vec![CommitOperation::BlockFinalized( + correct_id(20), + Some(Entry { valid_from: correct_id(20), value: 20 }), + vec![0, 1, 2].into_iter().collect(), + )] + .into(), + ); assert_eq!(cache.best_finalized_block, correct_id(20)); - assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: 20 })); + assert_eq!( + cache.best_finalized_entry, + Some(Entry { valid_from: correct_id(20), value: 20 }) + ); assert!(cache.unfinalized.is_empty()); } @@ -1324,45 +1617,88 @@ mod tests { fn list_find_unfinalized_fork_works() { // ----------> [3] // --- [2] ---------> 4 ---> [5] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap().find_unfinalized_fork((&correct_id(4)).into()).unwrap().unwrap().head.valid_from, correct_id(5)); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } + ) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)), + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&correct_id(4)).into()) + .unwrap() + .unwrap() + .head + .valid_from, + correct_id(5) + ); // --- [2] ---------------> [5] // ----------> [3] ---> 4 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 2)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap() - .find_unfinalized_fork((&fork_id(0, 1, 4)).into()).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } + ) + .with_entry( + correct_id(2), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(fork_header(0, 1, 2)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 4)), + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&fork_id(0, 1, 4)).into()) + .unwrap() + .unwrap() + .head + .valid_from, + fork_id(0, 1, 3) + ); // --- [2] ---------------> [5] // ----------> [3] // -----------------> 4 assert!(ListCache::new( DummyStorage::new() .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } + ) + .with_entry( + correct_id(2), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } + ) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)) @@ -1372,89 +1708,167 @@ mod tests { .with_header(fork_header(1, 1, 2)) .with_header(fork_header(1, 1, 3)) .with_header(fork_header(1, 1, 4)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap().find_unfinalized_fork((&fork_id(1, 1, 4)).into()).unwrap().is_none()); + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&fork_id(1, 1, 4)).into()) + .unwrap() + .is_none()); } #[test] fn fork_matches_works() { // when block is not within list range let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .matches(&storage, (&test_id(20)).into()).unwrap(), false); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .matches(&storage, (&test_id(20)).into()) + .unwrap(), + false + ); // when block is not connected to the begin block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&fork_id(0, 2, 4)).into()).unwrap(), false); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&fork_id(0, 2, 4)).into()) + .unwrap(), + false + ); // when block is not connected to the end block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 3, 4)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&fork_id(0, 3, 4)).into()).unwrap(), false); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&fork_id(0, 3, 4)).into()) + .unwrap(), + false + ); // when block is connected to the begin block AND end is open let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) .with_header(test_header(5)) .with_header(test_header(6)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&correct_id(6)).into()).unwrap(), true); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&correct_id(6)).into()) + .unwrap(), + true + ); // when block is connected to the begin block AND to the end block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&correct_id(4)).into()).unwrap(), true); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .matches(&storage, (&correct_id(4)).into()) + .unwrap(), + true + ); } #[test] fn fork_try_append_works() { // when best block is unknown - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), false); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .try_append(&test_id(100)), + false + ); // when best block is known but different - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(101)), false); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .try_append(&test_id(101)), + false + ); // when best block is known and the same - assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), true); + assert_eq!( + Fork::<_, u64> { + best_block: Some(test_id(100)), + head: Entry { valid_from: test_id(100), value: 0 } + } + .try_append(&test_id(100)), + true + ); } #[test] fn fork_try_append_or_fork_works() { // when there's no entry before parent let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append_or_fork(&storage, &test_id(30), None).unwrap(), None); + assert_eq!( + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } + .try_append_or_fork(&storage, &test_id(30), None) + .unwrap(), + None + ); // when parent does not belong to the fork let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 2, 4), None).unwrap(), None); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .try_append_or_fork(&storage, &fork_id(0, 2, 4), None) + .unwrap(), + None + ); // when the entry before parent is the head entry let storage = DummyStorage::new() .with_entry( @@ -1463,30 +1877,57 @@ mod tests { ) .with_header(test_header(6)) .with_header(test_header(5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .try_append_or_fork(&storage, &correct_id(6), None).unwrap(), Some(ForkAppendResult::Append)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(5), value: 100 } + } + .try_append_or_fork(&storage, &correct_id(6), None) + .unwrap(), + Some(ForkAppendResult::Append) + ); // when the parent located after last finalized entry let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(6), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(6)) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), None).unwrap(), Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3)))); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(6), value: 100 } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), None) + .unwrap(), + Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3))) + ); // when the parent located before last finalized entry let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry( + correct_id(6), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(6)) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)).unwrap(), None); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { valid_from: correct_id(6), value: 100 } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)) + .unwrap(), + None + ); } #[test] @@ -1495,12 +1936,16 @@ mod tests { let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); let mut tx = DummyTransaction::new(); Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); assert!(tx.removed_entries().is_empty()); // when we reach finalized entry with iterations let storage = DummyStorage::new() .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: 50 }) .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: 10 }) @@ -1508,120 +1953,192 @@ mod tests { .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: 0 }); let mut tx = DummyTransaction::new(); Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash, test_id(20).hash].into_iter().collect()); + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash, test_id(20).hash] + .into_iter() + .collect() + ); // when we reach beginning of fork before finalized block let storage = DummyStorage::new() .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, + ) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); let mut tx = DummyTransaction::new(); Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash].into_iter().collect()); + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash].into_iter().collect() + ); } #[test] fn is_connected_to_block_fails() { // when storage returns error - assert!( - chain::is_connected_to_block::<_, u64, _>( - &FaultyStorage, - (&test_id(1)).into(), - &test_id(100), - ).is_err(), - ); + assert!(chain::is_connected_to_block::<_, u64, _>( + &FaultyStorage, + (&test_id(1)).into(), + &test_id(100), + ) + .is_err(),); // when there's no header in the storage - assert!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new(), - (&test_id(1)).into(), - &test_id(100), - ).is_err(), - ); + assert!(chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new(), + (&test_id(1)).into(), + &test_id(100), + ) + .is_err(),); } #[test] fn is_connected_to_block_works() { // when without iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - (&test_id(1)).into(), &correct_id(1)).unwrap(), false); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + (&test_id(1)).into(), + &correct_id(1) + ) + .unwrap(), + false + ); // when with ASC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&test_id(0)).into(), &correct_id(2)).unwrap(), false); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&test_id(0)).into(), + &correct_id(2) + ) + .unwrap(), + false + ); // when with DESC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), &test_id(0)).unwrap(), false); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(2)).into(), + &test_id(0) + ) + .unwrap(), + false + ); // when without iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - (&correct_id(1)).into(), &correct_id(1)).unwrap(), true); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + (&correct_id(1)).into(), + &correct_id(1) + ) + .unwrap(), + true + ); // when with ASC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(0)).into(), &correct_id(2)).unwrap(), true); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(0)).into(), + &correct_id(2) + ) + .unwrap(), + true + ); // when with DESC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), &correct_id(0)).unwrap(), true); + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(2)).into(), + &correct_id(0) + ) + .unwrap(), + true + ); } #[test] fn is_finalized_block_fails() { // when storage returns error assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); - } #[test] fn is_finalized_block_works() { // when number of block is larger than last finalized block - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), false); + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), + false + ); // when there's no hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), false); + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), + false + ); // when there's different hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(2)), &test_id(1), 100).unwrap(), false); + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(2)), + &test_id(1), + 100 + ) + .unwrap(), + false + ); // when there's the same hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(1)), &test_id(1), 100).unwrap(), true); + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(1)), + &test_id(1), + 100 + ) + .unwrap(), + true + ); } #[test] fn read_forks_fails() { // when storage returns error during finalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); + assert!(read_forks::( + &FaultyStorage, + Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } + ) + .is_err()); // when storage returns error during unfinalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); + assert!(read_forks::( + &FaultyStorage, + Metadata { finalized: None, unfinalized: vec![test_id(1)] } + ) + .is_err()); // when finalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); + assert!(read_forks::( + &DummyStorage::new(), + Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } + ) + .is_err()); // when unfinalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); + assert!(read_forks::( + &DummyStorage::new(), + Metadata { finalized: None, unfinalized: vec![test_id(1)] } + ) + .is_err()); } #[test] @@ -1638,23 +2155,40 @@ mod tests { ], ); - assert_eq!(expected, read_forks(&storage, Metadata { - finalized: Some(test_id(10)), - unfinalized: vec![test_id(20), test_id(30)], - }).unwrap()); + assert_eq!( + expected, + read_forks( + &storage, + Metadata { + finalized: Some(test_id(10)), + unfinalized: vec![test_id(20), test_id(30)], + } + ) + .unwrap() + ); } #[test] fn ancient_entries_are_pruned_when_pruning_enabled() { fn do_test(strategy: PruningStrategy) { - let cache = ListCache::new(DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_id(20, H256::from_low_u64_be(20)) - .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), - strategy, test_id(9)).unwrap(); + let cache = ListCache::new( + DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_id(20, H256::from_low_u64_be(20)) + .with_id(30, H256::from_low_u64_be(30)) + .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) + .with_entry( + test_id(20), + StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }, + ) + .with_entry( + test_id(30), + StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }, + ), + strategy, + test_id(9), + ) + .unwrap(); let mut tx = DummyTransaction::new(); // when finalizing entry #10: no entries pruned @@ -1678,7 +2212,10 @@ mod tests { }, PruningStrategy::ByDepth(_) => { assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); - assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect()); + assert_eq!( + *tx.inserted_entries(), + vec![test_id(20).hash].into_iter().collect() + ); }, } } @@ -1696,15 +2233,36 @@ mod tests { // -> (3') -> 4' -> 5' let mut cache = ListCache::new( DummyStorage::new() - .with_meta(Some(correct_id(1)), vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)]) + .with_meta( + Some(correct_id(1)), + vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)], + ) .with_id(1, correct_id(1).hash) .with_entry(correct_id(1), StorageEntry { prev_valid_from: None, value: 1 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 3 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 4 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(4)), value: 5 }) - .with_entry(fork_id(1, 2, 4), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 14 }) - .with_entry(fork_id(1, 2, 5), StorageEntry { prev_valid_from: Some(fork_id(1, 2, 4)), value: 15 }) - .with_entry(fork_id(2, 4, 5), StorageEntry { prev_valid_from: Some(correct_id(4)), value: 25 }) + .with_entry( + correct_id(3), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 3 }, + ) + .with_entry( + correct_id(4), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 4 }, + ) + .with_entry( + correct_id(5), + StorageEntry { prev_valid_from: Some(correct_id(4)), value: 5 }, + ) + .with_entry( + fork_id(1, 2, 4), + StorageEntry { prev_valid_from: Some(correct_id(1)), value: 14 }, + ) + .with_entry( + fork_id(1, 2, 5), + StorageEntry { prev_valid_from: Some(fork_id(1, 2, 4)), value: 15 }, + ) + .with_entry( + fork_id(2, 4, 5), + StorageEntry { prev_valid_from: Some(correct_id(4)), value: 25 }, + ) .with_header(test_header(1)) .with_header(test_header(2)) .with_header(test_header(3)) @@ -1714,29 +2272,40 @@ mod tests { .with_header(fork_header(1, 2, 4)) .with_header(fork_header(1, 2, 5)) .with_header(fork_header(2, 4, 5)), - PruningStrategy::ByDepth(1024), correct_id(1) - ).unwrap(); + PruningStrategy::ByDepth(1024), + correct_id(1), + ) + .unwrap(); // when 5 is reverted: entry 5 is truncated let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, Some(Fork { best_block: None, head: Entry { valid_from: correct_id(4), value: 4 } })), - ].into_iter().collect())); + assert_eq!( + op, + CommitOperation::BlockReverted( + vec![( + 0, + Some(Fork { + best_block: None, + head: Entry { valid_from: correct_id(4), value: 4 } + }) + ),] + .into_iter() + .collect() + ) + ); cache.on_transaction_commit(vec![op].into()); // when 3 is reverted: entries 4+5' are truncated let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, None), - (2, None), - ].into_iter().collect())); + assert_eq!( + op, + CommitOperation::BlockReverted(vec![(0, None), (2, None),].into_iter().collect()) + ); cache.on_transaction_commit(vec![op].into()); // when 2 is reverted: entries 4'+5' are truncated let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, None), - ].into_iter().collect())); + assert_eq!(op, CommitOperation::BlockReverted(vec![(0, None),].into_iter().collect())); cache.on_transaction_commit(vec![op].into()); } diff --git a/substrate/client/db/src/cache/list_entry.rs b/substrate/client/db/src/cache/list_entry.rs index 94d4eb9f49b2739c3a0325da899c8a7b57dc3ad6..7cee7a5146260eb64365597194d0a5b049e147dc 100644 --- a/substrate/client/db/src/cache/list_entry.rs +++ b/substrate/client/db/src/cache/list_entry.rs @@ -18,12 +18,11 @@ //! List-cache storage entries. +use codec::{Decode, Encode}; use sp_blockchain::Result as ClientResult; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use codec::{Encode, Decode}; -use crate::cache::{CacheItemT, ComplexBlockId}; -use crate::cache::list_storage::{Storage}; +use crate::cache::{list_storage::Storage, CacheItemT, ComplexBlockId}; /// Single list-based cache entry. #[derive(Debug)] @@ -52,10 +51,8 @@ impl Entry { match value { Some(value) => match self.value == value { true => None, - false => Some(StorageEntry { - prev_valid_from: Some(self.valid_from.clone()), - value, - }), + false => + Some(StorageEntry { prev_valid_from: Some(self.valid_from.clone()), value }), }, None => None, } @@ -67,7 +64,8 @@ impl Entry { storage: &S, block: NumberFor, ) -> ClientResult, Option>)>> { - Ok(self.search_best_before(storage, block)? + Ok(self + .search_best_before(storage, block)? .map(|(entry, next)| (entry.valid_from, next))) } @@ -86,14 +84,14 @@ impl Entry { let mut current = self.valid_from.clone(); if block >= self.valid_from.number { let value = self.value.clone(); - return Ok(Some((Entry { valid_from: current, value }, next))); + return Ok(Some((Entry { valid_from: current, value }, next))) } // else - travel back in time loop { let entry = storage.require_entry(¤t)?; if block >= current.number { - return Ok(Some((Entry { valid_from: current, value: entry.value }, next))); + return Ok(Some((Entry { valid_from: current, value: entry.value }, next))) } next = Some(current); @@ -108,18 +106,15 @@ impl Entry { impl StorageEntry { /// Converts storage entry into an entry, valid from given block. pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { - Entry { - valid_from, - value: self.value, - } + Entry { valid_from, value: self.value } } } #[cfg(test)] mod tests { - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; - use substrate_test_runtime_client::runtime::{H256, Block}; use super::*; + use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; + use substrate_test_runtime_client::runtime::{Block, H256}; fn test_id(number: u64) -> ComplexBlockId { ComplexBlockId::new(H256::from_low_u64_be(number), number) @@ -132,36 +127,61 @@ mod tests { // when trying to update with the same Some value assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(1)), None); // when trying to update with different Some value - assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 })); + assert_eq!( + Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), + Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 }) + ); } #[test] fn entry_search_best_before_fails() { // when storage returns error assert!(Entry::<_, u64> { valid_from: test_id(100), value: 42 } - .search_best_before(&FaultyStorage, 50).is_err()); + .search_best_before(&FaultyStorage, 50) + .is_err()); } #[test] fn entry_search_best_before_works() { // when block is better than our best block - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new(), 150).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None))); + assert_eq!( + Entry::<_, u64> { valid_from: test_id(100), value: 100 } + .search_best_before(&DummyStorage::new(), 150) + .unwrap(), + Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None)) + ); // when block is found between two entries - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 }), - 75).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100))))); + assert_eq!( + Entry::<_, u64> { valid_from: test_id(100), value: 100 } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } + ) + .with_entry( + test_id(50), + StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 } + ), + 75 + ) + .unwrap(), + Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100)))) + ); // when block is not found - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), - 30).unwrap(), - None); + assert_eq!( + Entry::<_, u64> { valid_from: test_id(100), value: 100 } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } + ) + .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), + 30 + ) + .unwrap(), + None + ); } } diff --git a/substrate/client/db/src/cache/list_storage.rs b/substrate/client/db/src/cache/list_storage.rs index e4b3677b4ab310de91323f0a208ec6a327c57115..bb47b8dab5a7f9219874e2024a3860143681874d 100644 --- a/substrate/client/db/src/cache/list_storage.rs +++ b/substrate/client/db/src/cache/list_storage.rs @@ -20,17 +20,23 @@ use std::sync::Arc; +use crate::utils::{self, meta_keys}; +use codec::{Decode, Encode}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use codec::{Encode, Decode}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_database::{Database, Transaction}; -use crate::utils::{self, meta_keys}; - -use crate::cache::{CacheItemT, ComplexBlockId}; -use crate::cache::list_cache::{CommitOperation, Fork}; -use crate::cache::list_entry::{Entry, StorageEntry}; -use crate::DbHash; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, +}; + +use crate::{ + cache::{ + list_cache::{CommitOperation, Fork}, + list_entry::{Entry, StorageEntry}, + CacheItemT, ComplexBlockId, + }, + DbHash, +}; /// Single list-cache metadata. #[derive(Debug)] @@ -54,14 +60,21 @@ pub trait Storage { fn read_meta(&self) -> ClientResult>; /// Reads cache entry from the storage. - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>>; + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>>; /// Reads referenced (and thus existing) cache entry from the storage. fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { - self.read_entry(at) - .and_then(|entry| entry - .ok_or_else(|| ClientError::from( - ClientError::Backend(format!("Referenced cache entry at {:?} is not found", at))))) + self.read_entry(at).and_then(|entry| { + entry.ok_or_else(|| { + ClientError::from(ClientError::Backend(format!( + "Referenced cache entry at {:?} is not found", + at + ))) + }) + }) } } @@ -111,10 +124,14 @@ impl DbStorage { } /// Get reference to the database. - pub fn db(&self) -> &Arc> { &self.db } + pub fn db(&self) -> &Arc> { + &self.db + } /// Get reference to the database columns. - pub fn columns(&self) -> &DbColumns { &self.columns } + pub fn columns(&self) -> &DbColumns { + &self.columns + } /// Encode block id for storing as a key in cache column. /// We append prefix to the actual encoding to allow several caches @@ -128,25 +145,35 @@ impl DbStorage { impl Storage for DbStorage { fn read_id(&self, at: NumberFor) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Number(at)) - .map(|maybe_header| maybe_header.map(|header| header.hash())) + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Number(at), + ) + .map(|maybe_header| maybe_header.map(|header| header.hash())) } fn read_header(&self, at: &Block::Hash) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Hash(*at)) + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Hash(*at), + ) } fn read_meta(&self) -> ClientResult> { match self.db.get(self.columns.meta, &self.meta_key) { Some(meta) => meta::decode(&*meta), - None => Ok(Metadata { - finalized: None, - unfinalized: Vec::new(), - }) + None => Ok(Metadata { finalized: None, unfinalized: Vec::new() }), } } - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { match self.db.get(self.columns.cache, &self.encode_block_id(at)) { Some(entry) => StorageEntry::::decode(&mut &entry[..]) .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) @@ -171,7 +198,11 @@ impl<'a> DbStorageTransaction<'a> { impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> { fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { - self.tx.set_from_vec(self.storage.columns.cache, &self.storage.encode_block_id(at), entry.encode()); + self.tx.set_from_vec( + self.storage.columns.cache, + &self.storage.encode_block_id(at), + entry.encode(), + ); } fn remove_storage_entry(&mut self, at: &ComplexBlockId) { @@ -187,7 +218,8 @@ impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorag self.tx.set_from_vec( self.storage.columns.meta, &self.storage.meta_key, - meta::encode(best_finalized_entry, unfinalized, operation)); + meta::encode(best_finalized_entry, unfinalized, operation), + ); } } @@ -206,10 +238,11 @@ mod meta { pub fn encode( best_finalized_entry: Option<&Entry>, unfinalized: &[Fork], - op: &CommitOperation + op: &CommitOperation, ) -> Vec { let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); - let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); + let mut unfinalized = + unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); match op { CommitOperation::AppendNewBlock(_, _) => (), @@ -230,8 +263,11 @@ mod meta { CommitOperation::BlockReverted(ref forks) => { for (fork_index, updated_fork) in forks.iter().rev() { match updated_fork { - Some(updated_fork) => unfinalized[*fork_index] = &updated_fork.head().valid_from, - None => { unfinalized.remove(*fork_index); }, + Some(updated_fork) => + unfinalized[*fork_index] = &updated_fork.head().valid_from, + None => { + unfinalized.remove(*fork_index); + }, } } }, @@ -243,10 +279,12 @@ mod meta { /// Decode meta information. pub fn decode(encoded: &[u8]) -> ClientResult> { let input = &mut &*encoded; - let finalized: Option> = Decode::decode(input) - .map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?; - let unfinalized: Vec> = Decode::decode(input) - .map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?; + let finalized: Option> = Decode::decode(input).map_err(|_| { + ClientError::from(ClientError::Backend("Error decoding cache meta".into())) + })?; + let unfinalized: Vec> = Decode::decode(input).map_err(|_| { + ClientError::from(ClientError::Backend("Error decoding cache meta".into())) + })?; Ok(Metadata { finalized, unfinalized }) } @@ -254,8 +292,8 @@ mod meta { #[cfg(test)] pub mod tests { - use std::collections::{HashMap, HashSet}; use super::*; + use std::collections::{HashMap, HashSet}; pub struct FaultyStorage; @@ -272,7 +310,10 @@ pub mod tests { Err(ClientError::Backend("TestError".into())) } - fn read_entry(&self, _at: &ComplexBlockId) -> ClientResult>> { + fn read_entry( + &self, + _at: &ComplexBlockId, + ) -> ClientResult>> { Err(ClientError::Backend("TestError".into())) } } @@ -287,17 +328,18 @@ pub mod tests { impl DummyStorage { pub fn new() -> Self { DummyStorage { - meta: Metadata { - finalized: None, - unfinalized: Vec::new(), - }, + meta: Metadata { finalized: None, unfinalized: Vec::new() }, ids: HashMap::new(), headers: HashMap::new(), entries: HashMap::new(), } } - pub fn with_meta(mut self, finalized: Option>, unfinalized: Vec>) -> Self { + pub fn with_meta( + mut self, + finalized: Option>, + unfinalized: Vec>, + ) -> Self { self.meta.finalized = finalized; self.meta.unfinalized = unfinalized; self @@ -313,7 +355,11 @@ pub mod tests { self } - pub fn with_entry(mut self, at: ComplexBlockId, entry: StorageEntry) -> Self { + pub fn with_entry( + mut self, + at: ComplexBlockId, + entry: StorageEntry, + ) -> Self { self.entries.insert(at.hash, entry); self } @@ -332,7 +378,10 @@ pub mod tests { Ok(self.meta.clone()) } - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { Ok(self.entries.get(&at.hash).cloned()) } } @@ -366,7 +415,11 @@ pub mod tests { } impl StorageTransaction for DummyTransaction { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, _entry: &StorageEntry) { + fn insert_storage_entry( + &mut self, + at: &ComplexBlockId, + _entry: &StorageEntry, + ) { self.inserted_entries.insert(at.hash); } @@ -380,7 +433,9 @@ pub mod tests { unfinalized: &[Fork], operation: &CommitOperation, ) { - self.updated_meta = Some(meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap()); + self.updated_meta = Some( + meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap(), + ); } } } diff --git a/substrate/client/db/src/cache/mod.rs b/substrate/client/db/src/cache/mod.rs index 005d25b90f933491a7dde3a62ae7041a8c155a13..5502896aced2c923de7310e93a8804aa16e6a4c1 100644 --- a/substrate/client/db/src/cache/mod.rs +++ b/substrate/client/db/src/cache/mod.rs @@ -18,17 +18,27 @@ //! DB-backed cache of blockchain data. -use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; use parking_lot::RwLock; - -use sc_client_api::blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, Cache as BlockchainCache}; -use sp_blockchain::{Result as ClientResult, HeaderMetadataCache}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; + +use crate::{ + utils::{self, COLUMN_META}, + DbHash, +}; +use codec::{Decode, Encode}; +use sc_client_api::blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + Cache as BlockchainCache, +}; +use sp_blockchain::{HeaderMetadataCache, Result as ClientResult}; use sp_database::{Database, Transaction}; -use codec::{Encode, Decode}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use crate::utils::{self, COLUMN_META}; -use crate::DbHash; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, +}; use self::list_cache::{ListCache, PruningStrategy}; @@ -118,7 +128,10 @@ impl DbCache { } /// Begin cache transaction. - pub fn transaction<'a>(&'a mut self, tx: &'a mut Transaction) -> DbCacheTransaction<'a, Block> { + pub fn transaction<'a>( + &'a mut self, + tx: &'a mut Transaction, + ) -> DbCacheTransaction<'a, Block> { DbCacheTransaction { cache: self, tx, @@ -164,7 +177,7 @@ impl DbCache { self.key_lookup_column, self.header_column, self.cache_column, - &self.best_finalized_block + &self.best_finalized_block, ) } } @@ -184,19 +197,16 @@ fn get_cache_helper<'a, Block: BlockT>( Entry::Occupied(entry) => Ok(entry.into_mut()), Entry::Vacant(entry) => { let cache = ListCache::new( - self::list_storage::DbStorage::new(name.to_vec(), db.clone(), - self::list_storage::DbColumns { - meta: COLUMN_META, - key_lookup, - header, - cache, - }, + self::list_storage::DbStorage::new( + name.to_vec(), + db.clone(), + self::list_storage::DbColumns { meta: COLUMN_META, key_lookup, header, cache }, ), cache_pruning_strategy(name), best_finalized_block.clone(), )?; Ok(entry.insert(cache)) - } + }, } } @@ -210,10 +220,7 @@ pub struct DbCacheTransactionOps { impl DbCacheTransactionOps { /// Empty transaction ops. pub fn empty() -> DbCacheTransactionOps { - DbCacheTransactionOps { - cache_at_ops: HashMap::new(), - best_finalized_block: None, - } + DbCacheTransactionOps { cache_at_ops: HashMap::new(), best_finalized_block: None } } } @@ -244,19 +251,21 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { ) -> ClientResult { // prepare list of caches that are not update // (we might still need to do some cache maintenance in this case) - let missed_caches = self.cache.cache_at.keys() + let missed_caches = self + .cache + .cache_at + .keys() .filter(|cache| !data_at.contains_key(*cache)) .cloned() .collect::>(); - let mut insert_op = |name: CacheKeyId, value: Option>| -> Result<(), sp_blockchain::Error> { + let mut insert_op = |name: CacheKeyId, + value: Option>| + -> Result<(), sp_blockchain::Error> { let cache = self.cache.get_cache(name)?; let cache_ops = self.cache_at_ops.entry(name).or_default(); cache.on_block_insert( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx, - ), + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), parent.clone(), block.clone(), value, @@ -271,8 +280,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?; match entry_type { - EntryType::Final | EntryType::Genesis => - self.best_finalized_block = Some(block), + EntryType::Final | EntryType::Genesis => self.best_finalized_block = Some(block), EntryType::NonFinal => (), } @@ -288,10 +296,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { for (name, cache) in self.cache.cache_at.iter() { let cache_ops = self.cache_at_ops.entry(*name).or_default(); cache.on_block_finalize( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx - ), + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), parent.clone(), block.clone(), cache_ops, @@ -304,17 +309,11 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { } /// When block is reverted. - pub fn on_block_revert( - mut self, - reverted_block: &ComplexBlockId, - ) -> ClientResult { + pub fn on_block_revert(mut self, reverted_block: &ComplexBlockId) -> ClientResult { for (name, cache) in self.cache.cache_at.iter() { let cache_ops = self.cache_at_ops.entry(*name).or_default(); cache.on_block_revert( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx - ), + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), reverted_block, cache_ops, )?; @@ -352,7 +351,9 @@ impl BlockchainCache for DbCacheSync { &self, key: &CacheKeyId, at: &BlockId, - ) -> ClientResult, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>> { + ) -> ClientResult< + Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, + > { let mut cache = self.0.write(); let header_metadata_cache = cache.header_metadata_cache.clone(); let cache = cache.get_cache(*key)?; @@ -360,36 +361,39 @@ impl BlockchainCache for DbCacheSync { let db = storage.db(); let columns = storage.columns(); let at = match *at { - BlockId::Hash(hash) => { - match header_metadata_cache.header_metadata(hash) { - Some(metadata) => ComplexBlockId::new(hash, metadata.number), - None => { - let header = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Hash(hash.clone()))?; - ComplexBlockId::new(hash, *header.number()) - } - } + BlockId::Hash(hash) => match header_metadata_cache.header_metadata(hash) { + Some(metadata) => ComplexBlockId::new(hash, metadata.number), + None => { + let header = utils::require_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Hash(hash.clone()), + )?; + ComplexBlockId::new(hash, *header.number()) + }, }, BlockId::Number(number) => { let hash = utils::require_header::( &**db, columns.key_lookup, columns.header, - BlockId::Number(number.clone()))?.hash(); + BlockId::Number(number.clone()), + )? + .hash(); ComplexBlockId::new(hash, number) }, }; - cache.value_at_block(&at) - .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| + cache.value_at_block(&at).map(|block_and_value| { + block_and_value.map(|(begin_block, end_block, value)| { ( (begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value, - ))) + ) + }) + }) } } diff --git a/substrate/client/db/src/changes_tries_storage.rs b/substrate/client/db/src/changes_tries_storage.rs index 3863099a09f96fdd1c286caad2c5672a87791b1b..6b948a2d2c5c3161976ba08ed6eadc99901bc92a 100644 --- a/substrate/client/db/src/changes_tries_storage.rs +++ b/substrate/client/db/src/changes_tries_storage.rs @@ -18,33 +18,43 @@ //! DB-backed changes tries storage. -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use hash_db::Prefix; +use crate::{ + cache::{ + ComplexBlockId, DbCache, DbCacheSync, DbCacheTransactionOps, EntryType as CacheEntryType, + }, + utils::{self, meta_keys, Meta}, + Database, DbHash, +}; use codec::{Decode, Encode}; +use hash_db::Prefix; use parking_lot::RwLock; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; -use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache, HeaderMetadataCache}; -use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; -use sp_core::storage::PrefixedStorageKey; +use sp_blockchain::{ + well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderMetadataCache, + Result as ClientResult, +}; +use sp_core::{ + convert_hash, storage::PrefixedStorageKey, ChangesTrieConfiguration, + ChangesTrieConfigurationRange, +}; use sp_database::Transaction; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, +use sp_runtime::{ + generic::{BlockId, ChangesTrieSignal, DigestItem}, + traits::{Block as BlockT, CheckedSub, HashFor, Header as HeaderT, NumberFor, One, Zero}, }; -use sp_runtime::generic::{BlockId, DigestItem, ChangesTrieSignal}; use sp_state_machine::{ChangesTrieBuildCache, ChangesTrieCacheAction}; -use crate::{Database, DbHash}; -use crate::utils::{self, Meta, meta_keys}; -use crate::cache::{ - DbCacheSync, DbCache, DbCacheTransactionOps, - ComplexBlockId, EntryType as CacheEntryType, +use sp_trie::MemoryDB; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; /// Extract new changes trie configuration (if available) from the header. -pub fn extract_new_configuration(header: &Header) -> Option<&Option> { - header.digest() +pub fn extract_new_configuration( + header: &Header, +) -> Option<&Option> { + header + .digest() .log(DigestItem::as_changes_trie_signal) .and_then(ChangesTrieSignal::as_new_configuration) } @@ -68,10 +78,7 @@ impl DbChangesTrieStorageTransaction { impl From> for DbChangesTrieStorageTransaction { fn from(cache_ops: DbCacheTransactionOps) -> Self { - DbChangesTrieStorageTransaction { - cache_ops, - new_config: None, - } + DbChangesTrieStorageTransaction { cache_ops, new_config: None } } } @@ -173,21 +180,25 @@ impl DbChangesTrieStorage { let new_configuration = match new_configuration { Some(new_configuration) => new_configuration, None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), - None => return self.finalize( - tx, - parent_block.hash, - block.hash, - block.number, - Some(new_header), - cache_tx, - ), + None => + return self.finalize( + tx, + parent_block.hash, + block.hash, + block.number, + Some(new_header), + cache_tx, + ), }; // update configuration cache let mut cache_at = HashMap::new(); cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); Ok(DbChangesTrieStorageTransaction::from(match cache_tx { - Some(cache_tx) => self.cache.0.write() + Some(cache_tx) => self + .cache + .0 + .write() .transaction_with_ops(tx, cache_tx.cache_ops) .on_block_insert( parent_block, @@ -196,7 +207,10 @@ impl DbChangesTrieStorage { if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, )? .into_ops(), - None => self.cache.0.write() + None => self + .cache + .0 + .write() .transaction(tx) .on_block_insert( parent_block, @@ -205,7 +219,8 @@ impl DbChangesTrieStorage { if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, )? .into_ops(), - }).with_new_config(Some(new_configuration))) + }) + .with_new_config(Some(new_configuration))) } /// Called when block is finalized. @@ -226,7 +241,7 @@ impl DbChangesTrieStorage { if cache_tx.is_some() { if let Some(new_header) = new_header { if new_header.hash() == block_hash { - return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")); + return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")) } } } @@ -237,22 +252,21 @@ impl DbChangesTrieStorage { let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); Ok(match cache_tx { Some(cache_tx) => DbChangesTrieStorageTransaction::from( - self.cache.0.write() + self.cache + .0 + .write() .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() - ).with_new_config(cache_tx.new_config), + .on_block_finalize(parent_block, block)? + .into_ops(), + ) + .with_new_config(cache_tx.new_config), None => DbChangesTrieStorageTransaction::from( - self.cache.0.write() + self.cache + .0 + .write() .transaction(tx) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() + .on_block_finalize(parent_block, block)? + .into_ops(), ), }) } @@ -263,23 +277,24 @@ impl DbChangesTrieStorage { tx: &mut Transaction, block: &ComplexBlockId, ) -> ClientResult> { - Ok(self.cache.0.write().transaction(tx) - .on_block_revert(block)? - .into_ops() - .into()) + Ok(self.cache.0.write().transaction(tx).on_block_revert(block)?.into_ops().into()) } /// When transaction has been committed. pub fn post_commit(&self, tx: Option>) { if let Some(tx) = tx { - self.cache.0.write().commit(tx.cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there is tx; qed"); + self.cache.0.write().commit(tx.cache_ops).expect( + "only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there is tx; qed", + ); } } /// Commit changes into changes trie build cache. - pub fn commit_build_cache(&self, cache_update: ChangesTrieCacheAction>) { + pub fn commit_build_cache( + &self, + cache_update: ChangesTrieCacheAction>, + ) { self.build_cache.write().perform(cache_update); } @@ -307,7 +322,7 @@ impl DbChangesTrieStorage { // 2) or we are (or were) in period where changes tries are disabled if let Some((begin, end)) = tries_meta.oldest_digest_range { if block_num <= end || block_num - end <= min_blocks_to_keep.into() { - break; + break } tries_meta.oldest_pruned_digest_range_end = end; @@ -333,7 +348,8 @@ impl DbChangesTrieStorage { self.key_lookup_column, self.header_column, BlockId::Number(next_digest_range_start), - )?.hash(), + )? + .hash(), }; let config_for_new_block = new_header @@ -341,21 +357,18 @@ impl DbChangesTrieStorage { .unwrap_or(false); let next_config = match cache_tx { Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { - let config = cache_tx - .new_config - .clone() - .expect("guarded by is_some(); qed"); + let config = cache_tx.new_config.clone().expect("guarded by is_some(); qed"); ChangesTrieConfigurationRange { zero: (block_num, block_hash), end: None, config, } }, - _ if config_for_new_block => { - self.configuration_at(&BlockId::Hash(*new_header.expect( - "config_for_new_block is only true when new_header is passed; qed" - ).parent_hash()))? - }, + _ if config_for_new_block => self.configuration_at(&BlockId::Hash( + *new_header + .expect("config_for_new_block is only true when new_header is passed; qed") + .parent_hash(), + ))?, _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, }; if let Some(config) = next_config.config { @@ -370,11 +383,11 @@ impl DbChangesTrieStorage { } tries_meta.oldest_digest_range = Some(oldest_digest_range); - continue; + continue } tries_meta.oldest_digest_range = None; - break; + break } write_tries_meta(tx, self.meta_column, &*tries_meta); @@ -383,17 +396,23 @@ impl DbChangesTrieStorage { } impl PrunableStateChangesTrieStorage for DbChangesTrieStorage { - fn storage(&self) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { + fn storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { self } - fn configuration_at(&self, at: &BlockId) -> ClientResult< - ChangesTrieConfigurationRange, Block::Hash> - > { + fn configuration_at( + &self, + at: &BlockId, + ) -> ClientResult, Block::Hash>> { self.cache .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? - .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() - .map(|config| ChangesTrieConfigurationRange { zero, end, config })) + .and_then(|(zero, end, encoded)| { + Decode::decode(&mut &encoded[..]) + .ok() + .map(|config| ChangesTrieConfigurationRange { zero, end, config }) + }) .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) } @@ -409,14 +428,21 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu &self, hash: Block::Hash, ) -> Result>, String> { - utils::read_header::(&*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(hash)) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| maybe_header.map(|header| - sp_state_machine::ChangesTrieAnchorBlockId { + utils::read_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Hash(hash), + ) + .map_err(|e| e.to_string()) + .and_then(|maybe_header| { + maybe_header + .map(|header| sp_state_machine::ChangesTrieAnchorBlockId { hash, number: *header.number(), - } - ).ok_or_else(|| format!("Unknown header: {}", hash))) + }) + .ok_or_else(|| format!("Unknown header: {}", hash)) + }) } fn root( @@ -426,7 +452,10 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu ) -> Result, String> { // check API requirement: we can't get NEXT block(s) based on anchor if block > anchor.number { - return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); + return Err(format!( + "Can't get changes trie root at {} using anchor at {}", + block, anchor.number + )) } // we need to get hash of the block to resolve changes trie root @@ -438,8 +467,12 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu let mut current_num = anchor.number; let mut current_hash: Block::Hash = convert_hash(&anchor.hash); let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Number(current_num) - ).map_err(|e| e.to_string())?; + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Number(current_num), + ) + .map_err(|e| e.to_string())?; if maybe_anchor_header.hash() == current_hash { // if anchor is canonicalized, then the block is also canonicalized BlockId::Number(block) @@ -449,8 +482,12 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu // back from the anchor to the block with given number while current_num != block { let current_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(current_hash) - ).map_err(|e| e.to_string())?; + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Hash(current_hash), + ) + .map_err(|e| e.to_string())?; current_hash = *current_header.parent_hash(); current_num = current_num - One::one(); @@ -460,18 +497,16 @@ impl sp_state_machine::ChangesTrieRootsStorage, Nu } }; - Ok( - utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - block_id, - ) - .map_err(|e| e.to_string())? - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned() + Ok(utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + block_id, ) + .map_err(|e| e.to_string())? + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned()) } } @@ -480,7 +515,9 @@ impl sp_state_machine::ChangesTrieStorage, NumberFor &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { + fn as_roots_storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { self } @@ -503,8 +540,9 @@ fn read_tries_meta( meta_column: u32, ) -> ClientResult> { match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { - Some(h) => Decode::decode(&mut &h[..]) - .map_err(|err| ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), + Some(h) => Decode::decode(&mut &h[..]).map_err(|err| { + ClientError::Backend(format!("Error decoding changes tries metadata: {}", err)) + }), None => Ok(ChangesTriesMeta { oldest_digest_range: None, oldest_pruned_digest_range_end: Zero::zero(), @@ -523,18 +561,23 @@ fn write_tries_meta( #[cfg(test)] mod tests { + use super::*; + use crate::{ + tests::{insert_header, prepare_changes, Block}, + Backend, + }; use hash_db::EMPTY_PREFIX; use sc_client_api::backend::{ - Backend as ClientBackend, NewBlockState, BlockImportOperation, PrunableStateChangesTrieStorage, + Backend as ClientBackend, BlockImportOperation, NewBlockState, + PrunableStateChangesTrieStorage, }; use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; use sp_core::H256; - use sp_runtime::testing::{Digest, Header}; - use sp_runtime::traits::{Hash, BlakeTwo256}; + use sp_runtime::{ + testing::{Digest, Header}, + traits::{BlakeTwo256, Hash}, + }; use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; - use crate::Backend; - use crate::tests::{Block, insert_header, prepare_changes}; - use super::*; fn changes(number: u64) -> Option, Vec)>> { Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) @@ -554,7 +597,9 @@ mod tests { digest.push(DigestItem::ChangesTrieRoot(root)); changes_trie_update = update; } - digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration))); + digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( + new_configuration, + ))); let header = Header { number, @@ -573,7 +618,8 @@ mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) + .unwrap(); backend.commit_operation(op).unwrap(); header_hash @@ -584,11 +630,13 @@ mod tests { let backend = Backend::::new_test(1000, 100); backend.changes_tries_storage.meta.write().finalized_number = 1000; - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { + let check_changes = |backend: &Backend, + block: u64, + changes: Vec<(Vec, Vec)>| { let (changes_root, mut changes_trie_update) = prepare_changes(changes); let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block + number: block, }; assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); @@ -605,7 +653,13 @@ mod tests { ]; let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block0 = insert_header( + &backend, + 0, + Default::default(), + Some(changes0.clone()), + Default::default(), + ); let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); @@ -622,19 +676,29 @@ mod tests { let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block0 = insert_header( + &backend, + 0, + Default::default(), + Some(changes0.clone()), + Default::default(), + ); let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); + let block2_1_0 = + insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); + let block2_1_1 = + insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); + let block2_2_0 = + insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); + let block2_2_1 = + insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); // finalize block1 backend.changes_tries_storage.meta.write().finalized_number = 1; @@ -680,7 +744,12 @@ mod tests { if number == 0 { Default::default() } else { - backend.blockchain().header(BlockId::Number(number - 1)).unwrap().unwrap().hash() + backend + .blockchain() + .header(BlockId::Number(number - 1)) + .unwrap() + .unwrap() + .hash() } }; @@ -698,12 +767,14 @@ mod tests { let trie_root = backend .blockchain() .header(BlockId::Number(number)) - .unwrap().unwrap() + .unwrap() + .unwrap() .digest() .log(DigestItem::as_changes_trie_root) .cloned(); match trie_root { - Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), + Some(trie_root) => + backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), None => true, } }; @@ -711,14 +782,10 @@ mod tests { let finalize_block = |number| { let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); let mut tx = Transaction::new(); - let cache_ops = backend.changes_tries_storage.finalize( - &mut tx, - *header.parent_hash(), - header.hash(), - number, - None, - None, - ).unwrap(); + let cache_ops = backend + .changes_tries_storage + .finalize(&mut tx, *header.parent_hash(), header.hash(), number, None, None) + .unwrap(); backend.storage.db.commit(tx).unwrap(); backend.changes_tries_storage.post_commit(Some(cache_ops)); }; @@ -737,11 +804,23 @@ mod tests { (0..6).for_each(|number| insert_regular_header(false, number)); insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); (7..17).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 17, parent_hash(17), changes(17), config_at_17); + insert_header_with_configuration_change( + &backend, + 17, + parent_hash(17), + changes(17), + config_at_17, + ); (18..21).for_each(|number| insert_regular_header(false, number)); insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); (22..32).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 32, parent_hash(32), changes(32), config_at_32); + insert_header_with_configuration_change( + &backend, + 32, + parent_hash(32), + changes(32), + config_at_32, + ); (33..50).for_each(|number| insert_regular_header(true, number)); // when only genesis is finalized, nothing is pruned @@ -826,29 +905,24 @@ mod tests { let backend = Backend::::new_test(1000, 100); // configurations at blocks - let config_at_1 = Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - }); - let config_at_3 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); + let config_at_1 = Some(ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 }); + let config_at_3 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); let config_at_5 = None; - let config_at_7 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); + let config_at_7 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); // insert some blocks let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); + let block1 = + insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); + let block3 = + insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); let block4 = insert_header(&backend, 4, block3, None, Default::default()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); + let block5 = + insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); let block6 = insert_header(&backend, 6, block5, None, Default::default()); - let block7 = insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); + let block7 = + insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); // test configuration cache let storage = &backend.changes_tries_storage; @@ -887,17 +961,48 @@ mod tests { let mut backend = Backend::::new_test(10, 10); backend.changes_tries_storage.min_blocks_to_keep = Some(8); - let configs = (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); + let configs = + (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); // insert unfinalized headers - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, configs[0].clone()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(1), configs[1].clone()); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(2), configs[2].clone()); + let block0 = insert_header_with_configuration_change( + &backend, + 0, + Default::default(), + None, + configs[0].clone(), + ); + let block1 = insert_header_with_configuration_change( + &backend, + 1, + block0, + changes(1), + configs[1].clone(), + ); + let block2 = insert_header_with_configuration_change( + &backend, + 2, + block1, + changes(2), + configs[2].clone(), + ); let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); - let block2_1 = insert_header_with_configuration_change(&backend, 2, block1, changes(8), side_config2_1.clone()); - let _ = insert_header_with_configuration_change(&backend, 3, block2_1, changes(9), side_config2_2.clone()); + let block2_1 = insert_header_with_configuration_change( + &backend, + 2, + block1, + changes(8), + side_config2_1.clone(), + ); + let _ = insert_header_with_configuration_change( + &backend, + 3, + block2_1, + changes(9), + side_config2_2.clone(), + ); // insert finalized header => 4 headers are finalized at once let header3 = Header { @@ -905,9 +1010,9 @@ mod tests { parent_hash: block2, state_root: Default::default(), digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[3].clone())), - ], + logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( + configs[3].clone(), + ))], }, extrinsics_root: Default::default(), }; @@ -920,9 +1025,27 @@ mod tests { backend.commit_operation(op).unwrap(); // insert more unfinalized headers - let block4 = insert_header_with_configuration_change(&backend, 4, block3, changes(4), configs[4].clone()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, changes(5), configs[5].clone()); - let block6 = insert_header_with_configuration_change(&backend, 6, block5, changes(6), configs[6].clone()); + let block4 = insert_header_with_configuration_change( + &backend, + 4, + block3, + changes(4), + configs[4].clone(), + ); + let block5 = insert_header_with_configuration_change( + &backend, + 5, + block4, + changes(5), + configs[5].clone(), + ); + let block6 = insert_header_with_configuration_change( + &backend, + 6, + block5, + changes(6), + configs[6].clone(), + ); // insert finalized header => 4 headers are finalized at once let header7 = Header { @@ -930,9 +1053,9 @@ mod tests { parent_hash: block6, state_root: Default::default(), digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[7].clone())), - ], + logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( + configs[7].clone(), + ))], }, extrinsics_root: Default::default(), }; @@ -950,23 +1073,33 @@ mod tests { let backend = Backend::::new_test(10, 10); let config0 = Some(ChangesTrieConfiguration::new(2, 5)); - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); + let block0 = + insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); let config1 = Some(ChangesTrieConfiguration::new(2, 6)); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); + let block1 = + insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); let just1 = Some((*b"TEST", vec![42])); backend.finalize_block(BlockId::Number(1), just1).unwrap(); let config2 = Some(ChangesTrieConfiguration::new(2, 7)); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); + let block2 = + insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); - let _ = insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); + let _ = + insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); - let block2_2 = insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); + let block2_2 = + insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); - let _ = insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); + let _ = + insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); // before truncate there are 2 unfinalized forks - block2_1+block2_3 assert_eq!( - backend.changes_tries_storage.cache.0.write() + backend + .changes_tries_storage + .cache + .0 + .write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -979,7 +1112,11 @@ mod tests { // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 backend.revert(1, false).unwrap(); assert_eq!( - backend.changes_tries_storage.cache.0.write() + backend + .changes_tries_storage + .cache + .0 + .write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -993,7 +1130,11 @@ mod tests { // the 1st one points to the block #3 because it isn't truncated backend.revert(1, false).unwrap(); assert_eq!( - backend.changes_tries_storage.cache.0.write() + backend + .changes_tries_storage + .cache + .0 + .write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -1005,15 +1146,17 @@ mod tests { // after truncating block2 - there are no unfinalized forks backend.revert(1, false).unwrap(); - assert!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>() - .is_empty(), - ); + assert!(backend + .changes_tries_storage + .cache + .0 + .write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>() + .is_empty(),); } } diff --git a/substrate/client/db/src/children.rs b/substrate/client/db/src/children.rs index 62352e6d0614aced3ca0fccee9c33b47a258befe..c11e4204997d187ede2c38b5f5b6a740b2aa6db7 100644 --- a/substrate/client/db/src/children.rs +++ b/substrate/client/db/src/children.rs @@ -18,17 +18,22 @@ //! Functionality for reading and storing children hashes from db. -use codec::{Encode, Decode}; +use crate::DbHash; +use codec::{Decode, Encode}; use sp_blockchain; -use std::hash::Hash; use sp_database::{Database, Transaction}; -use crate::DbHash; +use std::hash::Hash; /// Returns the hashes of the children blocks of the block with `parent_hash`. pub fn read_children< K: Eq + Hash + Clone + Encode + Decode, V: Eq + Hash + Clone + Encode + Decode, ->(db: &dyn Database, column: u32, prefix: &[u8], parent_hash: K) -> sp_blockchain::Result> { +>( + db: &dyn Database, + column: u32, + prefix: &[u8], + parent_hash: K, +) -> sp_blockchain::Result> { let mut buf = prefix.to_vec(); parent_hash.using_encoded(|s| buf.extend(s)); @@ -65,9 +70,7 @@ pub fn write_children< } /// Prepare transaction to remove the children of `parent_hash`. -pub fn remove_children< - K: Eq + Hash + Clone + Encode + Decode, ->( +pub fn remove_children( tx: &mut Transaction, column: u32, prefix: &[u8], @@ -78,7 +81,6 @@ pub fn remove_children< tx.remove(column, &key); } - #[cfg(test)] mod tests { use super::*; diff --git a/substrate/client/db/src/lib.rs b/substrate/client/db/src/lib.rs index 505c7b9d49ea679067ae0b792a20b21313bfbece..3369b5fad055c71fa4ca884d4b9375279d862f1d 100644 --- a/substrate/client/db/src/lib.rs +++ b/substrate/client/db/src/lib.rs @@ -34,63 +34,72 @@ pub mod offchain; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub mod bench; -mod children; mod cache; mod changes_tries_storage; +mod children; +#[cfg(feature = "with-parity-db")] +mod parity_db; +mod stats; mod storage_cache; #[cfg(any(feature = "with-kvdb-rocksdb", test))] mod upgrade; mod utils; -mod stats; -#[cfg(feature = "with-parity-db")] -mod parity_db; -use std::sync::Arc; -use std::path::{Path, PathBuf}; -use std::io; -use std::collections::{HashMap, HashSet}; -use parking_lot::{Mutex, RwLock}; use linked_hash_map::LinkedHashMap; -use log::{trace, debug, warn}; +use log::{debug, trace, warn}; +use parking_lot::{Mutex, RwLock}; +use std::{ + collections::{HashMap, HashSet}, + io, + path::{Path, PathBuf}, + sync::Arc, +}; +use crate::{ + changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}, + stats::StateUsageStats, + storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}, + utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, +}; +use codec::{Decode, Encode}; +use hash_db::Prefix; use sc_client_api::{ - UsageInfo, MemoryInfo, IoInfo, MemorySize, - backend::{NewBlockState, PrunableStateChangesTrieStorage, ProvideChtRoots}, - leaves::{LeafSet, FinalizationDisplaced}, cht, + backend::{NewBlockState, ProvideChtRoots, PrunableStateChangesTrieStorage}, + cht, + leaves::{FinalizationDisplaced, LeafSet}, utils::is_descendent_of, + IoInfo, MemoryInfo, MemorySize, UsageInfo, }; +use sc_state_db::StateDb; +use sp_arithmetic::traits::Saturating; use sp_blockchain::{ - Result as ClientResult, Error as ClientError, - well_known_cache_keys, Backend as _, HeaderBackend, + well_known_cache_keys, Backend as _, CachedHeaderMetadata, Error as ClientError, HeaderBackend, + HeaderMetadata, HeaderMetadataCache, Result as ClientResult, +}; +use sp_core::{ + offchain::OffchainOverlayedChange, + storage::{well_known_keys, ChildInfo}, + ChangesTrieConfiguration, }; -use codec::{Decode, Encode}; -use hash_db::Prefix; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use sp_database::Transaction; -use sp_core::ChangesTrieConfiguration; -use sp_core::offchain::OffchainOverlayedChange; -use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_arithmetic::traits::Saturating; -use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Justifications, Storage}; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, - Hash, +use sp_runtime::{ + generic::{BlockId, DigestItem}, + traits::{ + Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, + Zero, + }, + Justification, Justifications, Storage, }; use sp_state_machine::{ - DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, - StorageCollection, ChildStorageCollection, OffchainChangesCollection, - backend::Backend as StateBackend, StateMachineStats, IndexOperation, + backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, + ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, + StorageCollection, UsageInfo as StateUsageInfo, }; -use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; -use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; -use sc_state_db::StateDb; -use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; -use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; -use crate::stats::StateUsageStats; +use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. -pub use sp_database::Database; pub use sc_state_db::PruningMode; +pub use sp_database::Database; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub use bench::BenchmarkingState; @@ -102,9 +111,8 @@ const CACHE_HEADERS: usize = 8; const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. -pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor ->; +pub type DbState = + sp_state_machine::TrieBackend>>, HashFor>; const DB_HASH_LEN: usize = 32; /// Hash type that this backend uses for the database. @@ -131,11 +139,7 @@ pub struct RefTrackingState { impl RefTrackingState { fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self { - RefTrackingState { - state, - parent_hash, - storage, - } + RefTrackingState { state, parent_hash, storage } } } @@ -154,7 +158,7 @@ impl std::fmt::Debug for RefTrackingState { } impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; + type Error = as StateBackend>>::Error; type Transaction = as StateBackend>>::Transaction; type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; @@ -214,7 +218,8 @@ impl StateBackend> for RefTrackingState { f: F, allow_missing: bool, ) -> Result { - self.state.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + self.state + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) } fn apply_to_keys_while bool>( @@ -237,16 +242,22 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { self.state.storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { self.state.child_storage_root(child_info, delta) } @@ -258,17 +269,13 @@ impl StateBackend> for RefTrackingState { self.state.keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.state.child_keys(child_info, prefix) } - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { + fn as_trie_backend( + &mut self, + ) -> Option<&sp_state_machine::TrieBackend>> { self.state.as_trie_backend() } @@ -432,7 +439,7 @@ pub struct BlockchainDb { impl BlockchainDb { fn new( db: Arc>, - transaction_storage: TransactionStorageMode + transaction_storage: TransactionStorageMode, ) -> ClientResult { let meta = read_meta::(&*db, columns::HEADER)?; let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; @@ -446,10 +453,7 @@ impl BlockchainDb { }) } - fn update_meta( - &self, - update: MetaUpdate, - ) { + fn update_meta(&self, update: MetaUpdate) { let MetaUpdate { hash, number, is_best, is_finalized, with_state } = update; let mut meta = self.meta.write(); if number.is_zero() { @@ -473,10 +477,9 @@ impl BlockchainDb { // Get block changes trie root, if available. fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block) - .map(|header| header.and_then(|header| - header.digest().log(DigestItem::as_changes_trie_root) - .cloned())) + self.header(block).map(|header| { + header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) + }) } } @@ -486,15 +489,15 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha BlockId::Hash(h) => { let mut cache = self.header_cache.lock(); if let Some(result) = cache.get_refresh(h) { - return Ok(result.clone()); + return Ok(result.clone()) } - let header = utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; + let header = + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?; cache_header(&mut cache, h.clone(), header.clone()); Ok(header) - } - BlockId::Number(_) => { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - } + }, + BlockId::Number(_) => + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id), } } @@ -527,10 +530,11 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha } fn hash(&self, number: NumberFor) -> ClientResult> { - self.header(BlockId::Number(number)).and_then(|maybe_header| match maybe_header { - Some(header) => Ok(Some(header.hash().clone())), - None => Ok(None), - }) + self.header(BlockId::Number(number)) + .and_then(|maybe_header| match maybe_header { + Some(header) => Ok(Some(header.hash().clone())), + None => Ok(None), + }) } } @@ -543,40 +547,51 @@ impl sc_client_api::blockchain::Backend for BlockchainDb match Decode::decode(&mut &body[..]) { Ok(body) => Ok(Some(body)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body: {}", err) - )), + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body: {}", + err + ))), }, TransactionStorageMode::StorageChain => { match Vec::::decode(&mut &body[..]) { Ok(index) => { - let extrinsics: ClientResult> = index.into_iter().map( - | ExtrinsicHeader { indexed_hash, data } | { + let extrinsics: ClientResult> = index + .into_iter() + .map(|ExtrinsicHeader { indexed_hash, data }| { let decode_result = if indexed_hash != Default::default() { match self.db.get(columns::TRANSACTION, indexed_hash.as_ref()) { Some(t) => { - let mut input = utils::join_input(data.as_ref(), t.as_ref()); + let mut input = + utils::join_input(data.as_ref(), t.as_ref()); Block::Extrinsic::decode(&mut input) }, - None => return Err(sp_blockchain::Error::Backend( - format!("Missing indexed transaction {:?}", indexed_hash)) - ) + None => + return Err(sp_blockchain::Error::Backend(format!( + "Missing indexed transaction {:?}", + indexed_hash + ))), } } else { Block::Extrinsic::decode(&mut data.as_ref()) }; - decode_result.map_err(|err| sp_blockchain::Error::Backend( - format!("Error decoding extrinsic: {}", err)) - ) - } - ).collect(); + decode_result.map_err(|err| { + sp_blockchain::Error::Backend(format!( + "Error decoding extrinsic: {}", + err + )) + }) + }) + .collect(); Ok(Some(extrinsics?)) - } - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body list: {}", err) - )), + }, + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body list: {}", + err + ))), } - } + }, } } @@ -584,10 +599,12 @@ impl sc_client_api::blockchain::Backend for BlockchainDb match Decode::decode(&mut &justifications[..]) { Ok(justifications) => Ok(Some(justifications)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding justifications: {}", err) - )), - } + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding justifications: {}", + err + ))), + }, None => Ok(None), } } @@ -631,19 +648,23 @@ impl sc_client_api::blockchain::Backend for BlockchainDb transactions.push(t), - None => return Err(sp_blockchain::Error::Backend( - format!("Missing indexed transaction {:?}", indexed_hash)) - ) + None => + return Err(sp_blockchain::Error::Backend(format!( + "Missing indexed transaction {:?}", + indexed_hash + ))), } } } Ok(Some(transactions)) - } - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body list: {}", err) - )), + }, + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body list: {}", + err + ))), } - } + }, } } } @@ -657,17 +678,25 @@ impl sc_client_api::blockchain::ProvideCache for Blockchai impl HeaderMetadata for BlockchainDb { type Error = sp_blockchain::Error; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).map_or_else(|| { - self.header(BlockId::hash(hash))?.map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header_metadata.hash, - header_metadata.clone(), - ); - header_metadata - }).ok_or_else(|| ClientError::UnknownBlock(format!("header not found in db: {}", hash))) - }, Ok) + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header_metadata_cache.header_metadata(hash).map_or_else( + || { + self.header(BlockId::hash(hash))? + .map(|header| { + let header_metadata = CachedHeaderMetadata::from(&header); + self.header_metadata_cache + .insert_header_metadata(header_metadata.hash, header_metadata.clone()); + header_metadata + }) + .ok_or_else(|| { + ClientError::UnknownBlock(format!("header not found in db: {}", hash)) + }) + }, + Ok, + ) } fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { @@ -701,8 +730,11 @@ impl ProvideChtRoots for BlockchainDb { }); cht::compute_root::, _>( - cht::size(), cht_number, cht_range.map(|num| self.hash(num)) - ).map(Some) + cht::size(), + cht_number, + cht_range.map(|num| self.hash(num)), + ) + .map(Some) } fn changes_trie_cht_root( @@ -728,7 +760,8 @@ impl ProvideChtRoots for BlockchainDb { cht::size(), cht_number, cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), - ).map(Some) + ) + .map(Some) } } @@ -759,8 +792,7 @@ impl BlockImportOperation { match value_operation { OffchainOverlayedChange::SetValue(val) => transaction.set_from_vec(columns::OFFCHAIN, &key, val), - OffchainOverlayedChange::Remove => - transaction.remove(columns::OFFCHAIN, &key), + OffchainOverlayedChange::Remove => transaction.remove(columns::OFFCHAIN, &key), } } @@ -778,18 +810,17 @@ impl BlockImportOperation { } } - fn apply_new_state( - &mut self, - storage: Storage, - ) -> ClientResult { + fn apply_new_state(&mut self, storage: Storage) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { - return Err(sp_blockchain::Error::InvalidState.into()); + return Err(sp_blockchain::Error::InvalidState.into()) } - let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)|( + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), - )); + ) + }); let mut changes_trie_config = None; let (root, transaction) = self.old_state.full_storage_root( @@ -799,7 +830,7 @@ impl BlockImportOperation { } (&k[..], Some(&v[..])) }), - child_delta + child_delta, ); let changes_trie_config = match changes_trie_config { @@ -812,10 +843,11 @@ impl BlockImportOperation { self.changes_trie_config_update = Some(changes_trie_config); Ok(root) } - } -impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { +impl sc_client_api::backend::BlockImportOperation + for BlockImportOperation +{ type State = SyncingCachingState, Block>; fn state(&self) -> ClientResult> { @@ -831,16 +863,13 @@ impl sc_client_api::backend::BlockImportOperation for Bloc leaf_state: NewBlockState, ) -> ClientResult<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - if let Some(changes_trie_config_update) = changes_tries_storage::extract_new_configuration(&header) { + if let Some(changes_trie_config_update) = + changes_tries_storage::extract_new_configuration(&header) + { self.changes_trie_config_update = Some(changes_trie_config_update.clone()); } - self.pending_block = Some(PendingBlock { - header, - body, - indexed_body, - justifications, - leaf_state, - }); + self.pending_block = + Some(PendingBlock { header, body, indexed_body, justifications, leaf_state }); Ok(()) } @@ -853,20 +882,13 @@ impl sc_client_api::backend::BlockImportOperation for Bloc Ok(()) } - fn reset_storage( - &mut self, - storage: Storage, - ) -> ClientResult { + fn reset_storage(&mut self, storage: Storage) -> ClientResult { let root = self.apply_new_state(storage)?; self.commit_state = true; Ok(root) } - fn set_genesis_state( - &mut self, - storage: Storage, - commit: bool, - ) -> ClientResult { + fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> ClientResult { let root = self.apply_new_state(storage)?; self.commit_state = commit; Ok(root) @@ -882,7 +904,8 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { self.aux_ops.append(&mut ops.into_iter().collect()); Ok(()) @@ -961,10 +984,7 @@ struct DbGenesisStorage { impl DbGenesisStorage { pub fn new(root: Block::Hash, storage: PrefixedMemoryDB>) -> Self { - DbGenesisStorage { - root, - storage, - } + DbGenesisStorage { root, storage } } } @@ -1012,13 +1032,13 @@ pub(crate) struct FrozenForDuration { impl FrozenForDuration { fn new(duration: std::time::Duration) -> Self { - Self { - duration, - value: Frozen { at: std::time::Instant::now(), value: None }.into(), - } + Self { duration, value: Frozen { at: std::time::Instant::now(), value: None }.into() } } - fn take_or_else(&self, f: F) -> T where F: FnOnce() -> T { + fn take_or_else(&self, f: F) -> T + where + F: FnOnce() -> T, + { let mut lock = self.value.lock(); if lock.at.elapsed() > self.duration || lock.value.is_none() { let new_value = f(); @@ -1104,7 +1124,8 @@ impl Backend { config.state_pruning.clone(), !config.source.supports_ref_counting(), &StateMetaDb(&*db), - ).map_err(map_e)?; + ) + .map_err(map_e)?; let storage_db = StorageDb { db: db.clone(), state_db, @@ -1120,11 +1141,7 @@ impl Backend { columns::HEADER, columns::CACHE, meta, - if is_archive_pruning { - None - } else { - Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) - }, + if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, )?; let backend = Backend { @@ -1148,10 +1165,13 @@ impl Backend { // Older DB versions have no last state key. Check if the state is available and set it. let info = backend.blockchain.info(); - if info.finalized_state.is_none() - && info.finalized_hash != Default::default() - && sc_client_api::Backend::have_state_at(&backend, &info.finalized_hash, info.finalized_number) - { + if info.finalized_state.is_none() && + info.finalized_hash != Default::default() && + sc_client_api::Backend::have_state_at( + &backend, + &info.finalized_hash, + info.finalized_number, + ) { backend.blockchain.update_meta(MetaUpdate { hash: info.finalized_hash, number: info.finalized_number, @@ -1183,11 +1203,7 @@ impl Backend { // cannot find tree route with empty DB. if meta.best_hash != Default::default() { - let tree_route = sp_blockchain::tree_route( - &self.blockchain, - meta.best_hash, - route_to, - )?; + let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?; // uncanonicalize: check safety violations and ensure the numbers no longer // point to these block hashes in the key mapping. @@ -1198,15 +1214,11 @@ impl Backend { (&r.number, &r.hash) ); - return Err(::sp_blockchain::Error::NotInFinalizedChain.into()); + return Err(::sp_blockchain::Error::NotInFinalizedChain.into()) } retracted.push(r.hash.clone()); - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - r.number - )?; + utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?; } // canonicalize: set the number lookup to map to this block's hash. @@ -1216,7 +1228,7 @@ impl Backend { transaction, columns::KEY_LOOKUP, e.number, - e.hash + e.hash, )?; } } @@ -1238,11 +1250,15 @@ impl Backend { header: &Block::Header, last_finalized: Option, ) -> ClientResult<()> { - let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); + let last_finalized = + last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); if *header.parent_hash() != last_finalized { - return Err(::sp_blockchain::Error::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", last_finalized, header.hash()), - ).into()); + return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + last_finalized, + header.hash() + )) + .into()) } Ok(()) } @@ -1279,13 +1295,7 @@ impl Backend { Justifications::from(justification).encode(), ); } - Ok(MetaUpdate { - hash: *hash, - number, - is_best: false, - is_finalized: true, - with_state, - }) + Ok(MetaUpdate { hash: *hash, number, is_best: false, is_finalized: true, with_state }) } // performs forced canonicalization with a delay after importing a non-finalized block. @@ -1294,9 +1304,7 @@ impl Backend { transaction: &mut Transaction, hash: Block::Hash, number: NumberFor, - ) - -> ClientResult<()> - { + ) -> ClientResult<()> { let number_u64 = number.saturated_into::(); if number_u64 > self.canonicalization_delay { let new_canonical = number_u64 - self.canonicalization_delay; @@ -1310,29 +1318,28 @@ impl Backend { sc_client_api::blockchain::HeaderBackend::hash( &self.blockchain, new_canonical.saturated_into(), - )?.ok_or_else(|| sp_blockchain::Error::Backend(format!( - "Can't canonicalize missing block number #{} when importing {:?} (#{})", - new_canonical, - hash, - number, - )))? + )? + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!( + "Can't canonicalize missing block number #{} when importing {:?} (#{})", + new_canonical, hash, number, + )) + })? }; if !sc_client_api::Backend::have_state_at(self, &hash, new_canonical.saturated_into()) { return Ok(()) } trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = self.storage.state_db.canonicalize_block(&hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e), + )?; apply_state_commit(transaction, commit); } Ok(()) } - fn try_commit_operation( - &self, - mut operation: BlockImportOperation, - ) -> ClientResult<()> { + fn try_commit_operation(&self, mut operation: BlockImportOperation) -> ClientResult<()> { let mut transaction = Transaction::new(); let mut finalization_displaced_leaves = None; @@ -1362,12 +1369,12 @@ impl Backend { } let imported = if let Some(pending_block) = operation.pending_block { - let hash = pending_block.header.hash(); let parent_hash = *pending_block.header.parent_hash(); let number = pending_block.header.number().clone(); - let existing_header = number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); + let existing_header = + number <= best_num && self.blockchain.header(BlockId::hash(hash))?.is_some(); // blocks are keyed by number + hash. let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; @@ -1378,12 +1385,7 @@ impl Backend { (Default::default(), Default::default()) }; - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); if let Some(body) = pending_block.body { @@ -1392,7 +1394,8 @@ impl Backend { transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); }, TransactionStorageMode::StorageChain => { - let body = apply_index_ops::(&mut transaction, body, operation.index_ops); + let body = + apply_index_ops::(&mut transaction, body, operation.index_ops); transaction.set_from_vec(columns::BODY, &lookup_key, body); }, } @@ -1408,11 +1411,19 @@ impl Backend { } } if let Some(justifications) = pending_block.justifications { - transaction.set_from_vec(columns::JUSTIFICATIONS, &lookup_key, justifications.encode()); + transaction.set_from_vec( + columns::JUSTIFICATIONS, + &lookup_key, + justifications.encode(), + ); } if number.is_zero() { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key.clone()); + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_BLOCK, + lookup_key.clone(), + ); transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); // for tests, because config is set from within the reset_storage @@ -1427,13 +1438,14 @@ impl Backend { // to bootstrap consensus. It is queried for an initial list of authorities, etc. *self.genesis_state.write() = Some(Arc::new(DbGenesisStorage::new( pending_block.header.state_root().clone(), - operation.db_updates.clone() + operation.db_updates.clone(), ))); } } let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let mut changeset: sc_state_db::ChangeSet> = + sc_state_db::ChangeSet::default(); let mut ops: u64 = 0; let mut bytes: u64 = 0; let mut removal: u64 = 0; @@ -1441,7 +1453,7 @@ impl Backend { for (mut key, (val, rc)) in operation.db_updates.drain() { if !self.storage.prefix_keys { // Strip prefix - key.drain(0 .. key.len() - DB_HASH_LEN); + key.drain(0..key.len() - DB_HASH_LEN); }; if rc > 0 { ops += 1; @@ -1450,7 +1462,7 @@ impl Backend { changeset.inserted.push((key, val.to_vec())); } else { changeset.inserted.push((key.clone(), val.to_vec())); - for _ in 0 .. rc - 1 { + for _ in 0..rc - 1 { changeset.inserted.push((key.clone(), Default::default())); } } @@ -1460,7 +1472,7 @@ impl Backend { if rc == -1 { changeset.deleted.push(key); } else { - for _ in 0 .. -rc { + for _ in 0..-rc { changeset.deleted.push(key.clone()); } } @@ -1471,27 +1483,32 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; - for (key, value) in operation.storage_updates.iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { - ops += 1; - bytes += key.len() as u64; - if let Some(v) = value.as_ref() { - bytes += v.len() as u64; - } + for (key, value) in operation + .storage_updates + .iter() + .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) + { + ops += 1; + bytes += key.len() as u64; + if let Some(v) = value.as_ref() { + bytes += v.len() as u64; + } } self.state_usage.tally_writes(ops, bytes); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block( - &hash, - number_u64, - &pending_block.header.parent_hash(), - changeset, - ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; + let commit = self + .storage + .state_db + .insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset) + .map_err(|e: sc_state_db::Error| { + sp_blockchain::Error::from_state_db(e) + })?; apply_state_commit(&mut transaction, commit); if number <= last_finalized_num { // Canonicalize in the db when re-importing existing blocks with state. - let commit = self.storage.state_db.canonicalize_block(&hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e), + )?; apply_state_commit(&mut transaction, commit); meta_updates.push(MetaUpdate { hash, @@ -1502,7 +1519,6 @@ impl Backend { }); } - // Check if need to finalize. Genesis is always finalized instantly. let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); finalized @@ -1555,11 +1571,14 @@ impl Backend { self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? } - let displaced_leaf = { let mut leaves = self.blockchain.leaves.write(); let displaced_leaf = leaves.import(hash, number, parent_hash); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); + leaves.prepare_transaction( + &mut transaction, + columns::META, + meta_keys::LEAF_PREFIX, + ); displaced_leaf }; @@ -1589,7 +1608,16 @@ impl Backend { with_state: operation.commit_state, }); - Some((pending_block.header, number, hash, enacted, retracted, displaced_leaf, is_best, cache)) + Some(( + pending_block.header, + number, + hash, + enacted, + retracted, + displaced_leaf, + is_best, + cache, + )) } else { None } @@ -1598,14 +1626,16 @@ impl Backend { }; let cache_update = if let Some(set_head) = operation.set_head { - if let Some(header) = sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { + if let Some(header) = + sc_client_api::blockchain::HeaderBackend::header(&self.blockchain, set_head)? + { let number = header.number(); let hash = header.hash(); let (enacted, retracted) = self.set_head_with_transaction( &mut transaction, hash.clone(), - (number.clone(), hash.clone()) + (number.clone(), hash.clone()), )?; meta_updates.push(MetaUpdate { hash, @@ -1616,7 +1646,10 @@ impl Backend { }); Some((enacted, retracted)) } else { - return Err(sp_blockchain::Error::UnknownBlock(format!("Cannot set head {:?}", set_head))) + return Err(sp_blockchain::Error::UnknownBlock(format!( + "Cannot set head {:?}", + set_head + ))) } } else { None @@ -1636,13 +1669,11 @@ impl Backend { _displaced_leaf, is_best, mut cache, - )) = imported { + )) = imported + { trace!(target: "db", "DB Commit done {:?}", hash); let header_metadata = CachedHeaderMetadata::from(&header); - self.blockchain.insert_header_metadata( - header_metadata.hash, - header_metadata, - ); + self.blockchain.insert_header_metadata(header_metadata.hash, header_metadata); cache_header(&mut self.blockchain.header_cache.lock(), hash, Some(header)); cache.sync_cache( &enacted, @@ -1693,10 +1724,15 @@ impl Backend { transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); if sc_client_api::Backend::have_state_at(self, &f_hash, f_num) && - self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) + self.storage + .state_db + .best_canonical() + .map(|c| f_num.saturated_into::() > c) + .unwrap_or(true) { - let commit = self.storage.state_db.canonicalize_block(&f_hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e))?; + let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( + |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e), + )?; apply_state_commit(transaction, commit); } @@ -1779,23 +1815,21 @@ impl Backend { TransactionStorageMode::BlockBody => {}, TransactionStorageMode::StorageChain => { match Vec::::decode(&mut &body[..]) { - Ok(body) => { + Ok(body) => for ExtrinsicHeader { indexed_hash, .. } in body { if indexed_hash != Default::default() { - transaction.release( - columns::TRANSACTION, - indexed_hash, - ); + transaction.release(columns::TRANSACTION, indexed_hash); } - } - } - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body list: {}", err) - )), + }, + Err(err) => + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body list: {}", + err + ))), } - } + }, } - } + }, None => return Ok(()), } Ok(()) @@ -1805,22 +1839,20 @@ impl Backend { let root = EmptyStorage::::new().0; // Empty trie let db_state = DbState::::new(self.storage.clone(), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new( - state, - self.shared_cache.clone(), - None, - ); + let caching_state = CachingState::new(state, self.shared_cache.clone(), None); Ok(SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), )) } } - -fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db::CommitSet>) { +fn apply_state_commit( + transaction: &mut Transaction, + commit: sc_state_db::CommitSet>, +) { for (key, val) in commit.data.inserted.into_iter() { transaction.set_from_vec(columns::STATE, &key[..], val); } @@ -1847,10 +1879,10 @@ fn apply_index_ops( match op { IndexOperation::Insert { extrinsic, hash, size } => { index_map.insert(extrinsic, (hash, size)); - } + }, IndexOperation::Renew { extrinsic, hash } => { renewed_map.insert(extrinsic, DbHash::from_slice(hash.as_ref())); - } + }, } } for (index, extrinsic) in body.into_iter().enumerate() { @@ -1858,10 +1890,7 @@ fn apply_index_ops( let extrinsic_header = if let Some(hash) = renewed_map.get(&(index as u32)) { // Bump ref counter transaction.reference(columns::TRANSACTION, DbHash::from_slice(hash.as_ref())); - ExtrinsicHeader { - indexed_hash: hash.clone(), - data: extrinsic, - } + ExtrinsicHeader { indexed_hash: hash.clone(), data: extrinsic } } else { match index_map.get(&(index as u32)) { Some((hash, size)) if *size as usize <= extrinsic.len() => { @@ -1876,12 +1905,7 @@ fn apply_index_ops( data: extrinsic[..offset].to_vec(), } }, - _ => { - ExtrinsicHeader { - indexed_hash: Default::default(), - data: extrinsic, - } - } + _ => ExtrinsicHeader { indexed_hash: Default::default(), data: extrinsic }, } }; extrinsic_headers.push(extrinsic_header); @@ -1895,28 +1919,28 @@ fn apply_index_ops( extrinsic_headers.encode() } -fn apply_indexed_body( - transaction: &mut Transaction, - body: Vec>, -) { +fn apply_indexed_body(transaction: &mut Transaction, body: Vec>) { for extrinsic in body { let hash = sp_runtime::traits::BlakeTwo256::hash(&extrinsic); - transaction.store( - columns::TRANSACTION, - DbHash::from_slice(hash.as_ref()), - extrinsic, - ); + transaction.store(columns::TRANSACTION, DbHash::from_slice(hash.as_ref()), extrinsic); } } -impl sc_client_api::backend::AuxStore for Backend where Block: BlockT { +impl sc_client_api::backend::AuxStore for Backend +where + Block: BlockT, +{ fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { let mut transaction = Transaction::new(); for (k, v) in insert { transaction.set(columns::AUX, k, v); @@ -1977,10 +2001,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(()) } - fn commit_operation( - &self, - operation: Self::BlockImportOperation, - ) -> ClientResult<()> { + fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { let usage = operation.old_state.usage_info(); self.state_usage.merge_sm(usage); @@ -1992,7 +2013,7 @@ impl sc_client_api::backend::Backend for Backend { e @ Err(_) => { self.storage.state_db.revert_pending(); e - } + }, } } @@ -2037,23 +2058,22 @@ impl sc_client_api::backend::Backend for Backend { let last_finalized = self.blockchain.last_finalized()?; // We can do a quick check first, before doing a proper but more expensive check - if number > self.blockchain.info().finalized_number - || (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) + if number > self.blockchain.info().finalized_number || + (hash != last_finalized && !is_descendent_of(&hash, &last_finalized)?) { - return Err(ClientError::NotInFinalizedChain); + return Err(ClientError::NotInFinalizedChain) } - let justifications = - if let Some(mut stored_justifications) = self.blockchain.justifications(block)? { - if !stored_justifications.append(justification) { - return Err(ClientError::BadJustification( - "Duplicate consensus engine ID".into() - )); - } - stored_justifications - } else { - Justifications::from(justification) - }; + let justifications = if let Some(mut stored_justifications) = + self.blockchain.justifications(block)? + { + if !stored_justifications.append(justification) { + return Err(ClientError::BadJustification("Duplicate consensus engine ID".into())) + } + stored_justifications + } else { + Justifications::from(justification) + }; transaction.set_from_vec( columns::JUSTIFICATIONS, @@ -2075,25 +2095,20 @@ impl sc_client_api::backend::Backend for Backend { } fn usage_info(&self) -> Option { - let (io_stats, state_stats) = self.io_stats.take_or_else(|| + let (io_stats, state_stats) = self.io_stats.take_or_else(|| { ( // TODO: implement DB stats and cache size retrieval kvdb::IoStats::empty(), self.state_usage.take(), ) - ); + }); let database_cache = MemorySize::from_bytes(0); - let state_cache = MemorySize::from_bytes( - (*&self.shared_cache).read().used_storage_cache_size(), - ); + let state_cache = + MemorySize::from_bytes((*&self.shared_cache).read().used_storage_cache_size()); let state_db = self.storage.state_db.memory_info(); Some(UsageInfo { - memory: MemoryInfo { - state_cache, - database_cache, - state_db, - }, + memory: MemoryInfo { state_cache, database_cache, state_db }, io: IoInfo { transactions: io_stats.transactions, bytes_read: io_stats.bytes_read, @@ -2123,29 +2138,31 @@ impl sc_client_api::backend::Backend for Backend { let finalized = self.blockchain.info().finalized_number; let revertible = best_number - finalized; - let n = if !revert_finalized && revertible < n { - revertible - } else { - n - }; + let n = if !revert_finalized && revertible < n { revertible } else { n }; let mut revert_blocks = || -> ClientResult> { - for c in 0 .. n.saturated_into::() { + for c in 0..n.saturated_into::() { if best_number.is_zero() { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); let removed_number = best_number; - let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)))?; + let removed = + self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best_number + )) + })?; let removed_hash = removed.hash(); let prev_number = best_number.saturating_sub(One::one()); - let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)) - )?; + let prev_hash = self.blockchain.hash(prev_number)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best_number + )) + })?; if !self.have_state_at(&prev_hash, prev_number) { return Ok(c.saturated_into::>()) @@ -2160,41 +2177,49 @@ impl sc_client_api::backend::Backend for Backend { let update_finalized = best_number < finalized; - let key = utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; + let key = + utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; let changes_trie_cache_ops = self.changes_tries_storage.revert( &mut transaction, - &cache::ComplexBlockId::new( - removed.hash(), - removed_number, - ), + &cache::ComplexBlockId::new(removed.hash(), removed_number), )?; if update_finalized { transaction.set_from_vec( columns::META, meta_keys::FINALIZED_BLOCK, - key.clone() + key.clone(), ); reverted_finalized.insert(removed_hash); if let Some((hash, _)) = self.blockchain.info().finalized_state { if hash == best_hash { - if !best_number.is_zero() - && self.have_state_at(&prev_hash, best_number - One::one()) + if !best_number.is_zero() && + self.have_state_at(&prev_hash, best_number - One::one()) { let lookup_key = utils::number_and_hash_to_lookup_key( best_number - One::one(), - prev_hash + prev_hash, )?; - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key); + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_STATE, + lookup_key, + ); } else { - transaction.remove(columns::META, meta_keys::FINALIZED_STATE); + transaction + .remove(columns::META, meta_keys::FINALIZED_STATE); } } } } transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); - children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, best_hash); + children::remove_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + best_hash, + ); self.storage.db.commit(transaction)?; self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); self.blockchain.update_meta(MetaUpdate { @@ -2202,10 +2227,10 @@ impl sc_client_api::backend::Backend for Backend { number: best_number, is_best: true, is_finalized: update_finalized, - with_state: false + with_state: false, }); - } - None => return Ok(c.saturated_into::>()) + }, + None => return Ok(c.saturated_into::>()), } } @@ -2230,36 +2255,27 @@ impl sc_client_api::backend::Backend for Backend { Ok((reverted, reverted_finalized)) } - fn remove_leaf_block( - &self, - hash: &Block::Hash, - ) -> ClientResult<()> { + fn remove_leaf_block(&self, hash: &Block::Hash) -> ClientResult<()> { let best_hash = self.blockchain.info().best_hash; if best_hash == *hash { - return Err( - sp_blockchain::Error::Backend( - format!("Can't remove best block {:?}", hash) - ) - ) + return Err(sp_blockchain::Error::Backend(format!("Can't remove best block {:?}", hash))) } let hdr = self.blockchain.header_metadata(hash.clone())?; if !self.have_state_at(&hash, hdr.number) { - return Err( - sp_blockchain::Error::UnknownBlock( - format!("State already discarded for {:?}", hash) - ) - ) + return Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + hash + ))) } let mut leaves = self.blockchain.leaves.write(); if !leaves.contains(hdr.number, *hash) { - return Err( - sp_blockchain::Error::Backend( - format!("Can't remove non-leaf block {:?}", hash) - ) - ) + return Err(sp_blockchain::Error::Backend(format!( + "Can't remove non-leaf block {:?}", + hash + ))) } let mut transaction = Transaction::new(); @@ -2267,13 +2283,9 @@ impl sc_client_api::backend::Backend for Backend { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); - let changes_trie_cache_ops = self.changes_tries_storage.revert( - &mut transaction, - &cache::ComplexBlockId::new( - *hash, - hdr.number, - ), - )?; + let changes_trie_cache_ops = self + .changes_tries_storage + .revert(&mut transaction, &cache::ComplexBlockId::new(*hash, hdr.number))?; self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); leaves.revert(hash.clone(), hdr.number); @@ -2300,11 +2312,7 @@ impl sc_client_api::backend::Backend for Backend { let root = genesis_state.root.clone(); let db_state = DbState::::new(genesis_state.clone(), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new( - state, - self.shared_cache.clone(), - None, - ); + let caching_state = CachingState::new(state, self.shared_cache.clone(), None); let mut state = SyncingCachingState::new( caching_state, self.state_usage.clone(), @@ -2318,33 +2326,26 @@ impl sc_client_api::backend::Backend for Backend { let hash = match block { BlockId::Hash(h) => h, - BlockId::Number(n) => self.blockchain.hash(n)?.ok_or_else(|| + BlockId::Number(n) => self.blockchain.hash(n)?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!("Unknown block number {}", n)) - )?, + })?, }; match self.blockchain.header_metadata(hash) { Ok(ref hdr) => { if !self.have_state_at(&hash, hdr.number) { - return Err( - sp_blockchain::Error::UnknownBlock( - format!("State already discarded for {:?}", block) - ) - ) + return Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + block + ))) } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root; let db_state = DbState::::new(self.storage.clone(), root); - let state = RefTrackingState::new( - db_state, - self.storage.clone(), - Some(hash.clone()), - ); - let caching_state = CachingState::new( - state, - self.shared_cache.clone(), - Some(hash), - ); + let state = + RefTrackingState::new(db_state, self.storage.clone(), Some(hash.clone())); + let caching_state = + CachingState::new(state, self.shared_cache.clone(), Some(hash)); Ok(SyncingCachingState::new( caching_state, self.state_usage.clone(), @@ -2352,11 +2353,10 @@ impl sc_client_api::backend::Backend for Backend { self.import_lock.clone(), )) } else { - Err( - sp_blockchain::Error::UnknownBlock( - format!("State already discarded for {:?}", block) - ) - ) + Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + block + ))) } }, Err(e) => Err(e), @@ -2366,13 +2366,13 @@ impl sc_client_api::backend::Backend for Backend { fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { if self.is_archive { match self.blockchain.header_metadata(hash.clone()) { - Ok(header) => { - sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root, - (&[], None), - ).unwrap_or(None).is_some() - }, + Ok(header) => sp_state_machine::Storage::get( + self.storage.as_ref(), + &header.state_root, + (&[], None), + ) + .unwrap_or(None) + .is_some(), _ => false, } } else { @@ -2389,18 +2389,22 @@ impl sc_client_api::backend::LocalBackend for Backend::default(); { - let mut trie = TrieDBMut::::new( - &mut changes_trie_update, - &mut changes_root - ); + let mut trie = + TrieDBMut::::new(&mut changes_trie_update, &mut changes_root); for (key, value) in changes { trie.insert(&key, &value).unwrap(); } @@ -2471,7 +2473,8 @@ pub(crate) mod tests { if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); } - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) + .unwrap(); backend.commit_operation(op).unwrap(); header_hash @@ -2505,13 +2508,8 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - op.set_block_data( - header, - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); db.commit_operation(op).unwrap(); } @@ -2520,14 +2518,18 @@ pub(crate) mod tests { db.storage.db.clone() }; - let backend = Backend::::new(DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), - state_pruning: PruningMode::keep_blocks(1), - source: DatabaseSettingsSrc::Custom(backing), - keep_blocks: KeepBlocks::All, - transaction_storage: TransactionStorageMode::BlockBody, - }, 0).unwrap(); + let backend = Backend::::new( + DatabaseSettings { + state_cache_size: 16777216, + state_cache_child_ratio: Some((50, 100)), + state_pruning: PruningMode::keep_blocks(1), + source: DatabaseSettingsSrc::Custom(backing), + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }, + 0, + ) + .unwrap(); assert_eq!(backend.blockchain().info().best_number, 9); for i in 0..10 { assert!(backend.blockchain().hash(i).unwrap().is_some()) @@ -2547,28 +2549,22 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - let storage = vec![ - (vec![1, 3, 5], vec![2, 4, 6]), - (vec![1, 2, 3], vec![9, 9, 9]), - ]; + let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])]; - header.state_root = op.old_state.storage_root(storage - .iter() - .map(|(x, y)| (&x[..], Some(&y[..]))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..])))) + .0 + .into(); let hash = header.hash(); op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - }).unwrap(); - op.set_block_data( - header.clone(), - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + }) + .unwrap(); + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); db.commit_operation(op).unwrap(); @@ -2592,26 +2588,17 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - let storage = vec![ - (vec![1, 3, 5], None), - (vec![5, 5, 5], Some(vec![4, 5, 6])), - ]; + let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - ); + let (root, overlay) = op + .old_state + .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data( - header, - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); db.commit_operation(op).unwrap(); @@ -2631,7 +2618,9 @@ pub(crate) mod tests { let hash = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -2646,22 +2635,22 @@ pub(crate) mod tests { op.reset_storage(Storage { top: Default::default(), children_default: Default::default(), - }).unwrap(); + }) + .unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.set_block_data( - header, - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap(), &b"hello"[..]); + assert_eq!( + backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .unwrap(), + &b"hello"[..] + ); hash }; @@ -2678,28 +2667,27 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.db_updates.insert(EMPTY_PREFIX, b"hello"); op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data( - header, - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap(), &b"hello"[..]); + assert_eq!( + backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .unwrap(), + &b"hello"[..] + ); hash }; @@ -2716,28 +2704,24 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data( - header, - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_some()); + assert!(backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .is_some()); hash }; @@ -2754,34 +2738,31 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); - op.set_block_data( - header, - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_none()); + assert!(backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .is_none()); } backend.finalize_block(BlockId::Number(1), None).unwrap(); backend.finalize_block(BlockId::Number(2), None).unwrap(); backend.finalize_block(BlockId::Number(3), None).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_none()); + assert!(backend + .storage + .db + .get(columns::STATE, &sp_trie::prefixed_key::(&key, EMPTY_PREFIX)) + .is_none()); } #[test] @@ -2803,8 +2784,14 @@ pub(crate) mod tests { let tree_route = tree_route(blockchain, a3, b2).unwrap(); assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![b1, b2] + ); } { @@ -2812,14 +2799,20 @@ pub(crate) mod tests { assert_eq!(tree_route.common_block().hash, a1); assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![a2, a3] + ); } { let tree_route = tree_route(blockchain, a3, a1).unwrap(); assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2] + ); assert!(tree_route.enacted().is_empty()); } @@ -2845,7 +2838,10 @@ pub(crate) mod tests { assert_eq!(tree_route.common_block().hash, block0); assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![block1]); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![block1] + ); } } @@ -2943,20 +2939,25 @@ pub(crate) mod tests { #[test] fn test_leaves_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); } #[test] fn test_children_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); substrate_test_runtime_client::trait_tests::test_children_for_backend(backend); } #[test] fn test_blockchain_query_by_number_gets_canonical() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( + backend, + ); } #[test] @@ -2974,7 +2975,10 @@ pub(crate) mod tests { let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c, block1_c]); + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block2_a, block2_b, block2_c, block1_c] + ); backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); backend.finalize_block(BlockId::hash(block2_a), None).unwrap(); @@ -2985,7 +2989,8 @@ pub(crate) mod tests { #[test] fn test_aux() { - let backend: Backend = Backend::new_test(0, 0); + let backend: Backend = + Backend::new_test(0, 0); assert!(backend.get_aux(b"test").unwrap().is_none()); backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap(); assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); @@ -2995,7 +3000,7 @@ pub(crate) mod tests { #[test] fn test_finalize_block_with_justification() { - use sc_client_api::blockchain::{Backend as BlockChainBackend}; + use sc_client_api::blockchain::Backend as BlockChainBackend; let backend = Backend::::new_test(10, 10); @@ -3013,7 +3018,7 @@ pub(crate) mod tests { #[test] fn test_append_justification_to_finalized_block() { - use sc_client_api::blockchain::{Backend as BlockChainBackend}; + use sc_client_api::blockchain::Backend as BlockChainBackend; let backend = Backend::::new_test(10, 10); @@ -3021,10 +3026,7 @@ pub(crate) mod tests { let _ = insert_header(&backend, 1, block0, None, Default::default()); let just0 = (CONS0_ENGINE_ID, vec![1, 2, 3]); - backend.finalize_block( - BlockId::Number(1), - Some(just0.clone().into()), - ).unwrap(); + backend.finalize_block(BlockId::Number(1), Some(just0.clone().into())).unwrap(); let just1 = (CONS1_ENGINE_ID, vec![4, 5]); backend.append_justification(BlockId::Number(1), just1.clone()).unwrap(); @@ -3077,7 +3079,9 @@ pub(crate) mod tests { let hash0 = { let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); let mut header = Header { number: 0, parent_hash: Default::default(), @@ -3088,30 +3092,28 @@ pub(crate) mod tests { let storage = vec![(b"test".to_vec(), b"test".to_vec())]; - header.state_root = op.old_state.storage_root(storage - .iter() - .map(|(x, y)| (&x[..], Some(&y[..]))) - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..])))) + .0 + .into(); let hash = header.hash(); op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - }).unwrap(); - op.set_block_data( - header.clone(), - Some(vec![]), - None, - None, - NewBlockState::Best, - ).unwrap(); + }) + .unwrap(); + op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) + .unwrap(); backend.commit_operation(op).unwrap(); hash }; - let block0_hash = backend.state_at(BlockId::Hash(hash0)) + let block0_hash = backend + .state_at(BlockId::Hash(hash0)) .unwrap() .storage_hash(&b"test"[..]) .unwrap(); @@ -3129,22 +3131,16 @@ pub(crate) mod tests { let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - ); + let (root, overlay) = op + .old_state + .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); let hash = header.hash(); op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data( - header, - Some(vec![]), - None, - None, - NewBlockState::Normal, - ).unwrap(); + op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Normal) + .unwrap(); backend.commit_operation(op).unwrap(); @@ -3159,7 +3155,8 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); } - let block1_hash = backend.state_at(BlockId::Hash(hash1)) + let block1_hash = backend + .state_at(BlockId::Hash(hash1)) .unwrap() .storage_hash(&b"test"[..]) .unwrap(); @@ -3189,7 +3186,8 @@ pub(crate) mod tests { let backend = Backend::::new_test(10, 10); // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_header(&backend, 0, Default::default(), None, Default::default()); + let mut prev_hash = + insert_header(&backend, 0, Default::default(), None, Default::default()); let cht_size: u64 = cht::size(); for i in 1..1 + cht_size + cht_size + 1 { prev_hash = insert_header(&backend, i, prev_hash, None, Default::default()); @@ -3197,12 +3195,18 @@ pub(crate) mod tests { let blockchain = backend.blockchain(); - let cht_root_1 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0)) - .unwrap().unwrap(); - let cht_root_2 = blockchain.header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap().unwrap(); - let cht_root_3 = blockchain.header_cht_root(cht_size, cht::end_number(cht_size, 0)) - .unwrap().unwrap(); + let cht_root_1 = blockchain + .header_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap() + .unwrap(); + let cht_root_2 = blockchain + .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = blockchain + .header_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap() + .unwrap(); assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); } @@ -3213,8 +3217,16 @@ pub(crate) mod tests { let backend = Backend::::new_test_with_tx_storage(2, 0, *storage); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); - for i in 0 .. 5 { - let hash = insert_block(&backend, i, prev_hash, None, Default::default(), vec![i.into()], None); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ); blocks.push(hash); prev_hash = hash; } @@ -3222,7 +3234,7 @@ pub(crate) mod tests { { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); - for i in 1 .. 5 { + for i in 1..5 { op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); } backend.commit_operation(op).unwrap(); @@ -3238,15 +3250,20 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage( - 2, - 10, - TransactionStorageMode::StorageChain - ); + let backend = + Backend::::new_test_with_tx_storage(2, 10, TransactionStorageMode::StorageChain); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); - for i in 0 .. 5 { - let hash = insert_block(&backend, i, prev_hash, None, Default::default(), vec![i.into()], None); + for i in 0..5 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ); blocks.push(hash); prev_hash = hash; } @@ -3259,15 +3276,23 @@ pub(crate) mod tests { None, sp_core::H256::random(), vec![2.into()], - None + None, + ); + insert_block( + &backend, + 3, + fork_hash_root, + None, + H256::random(), + vec![3.into(), 11.into()], + None, ); - insert_block(&backend, 3, fork_hash_root, None, H256::random(), vec![3.into(), 11.into()], None); let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); op.mark_head(BlockId::Hash(blocks[4])).unwrap(); backend.commit_operation(op).unwrap(); - for i in 1 .. 5 { + for i in 1..5 { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); @@ -3284,16 +3309,13 @@ pub(crate) mod tests { #[test] fn renew_transaction_storage() { - let backend = Backend::::new_test_with_tx_storage( - 2, - 10, - TransactionStorageMode::StorageChain - ); + let backend = + Backend::::new_test_with_tx_storage(2, 10, TransactionStorageMode::StorageChain); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); let x1 = ExtrinsicWrapper::from(0u64).encode(); - let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); - for i in 0 .. 10 { + let x1_hash = as sp_core::Hasher>::hash(&x1[1..]); + for i in 0..10 { let mut index = Vec::new(); if i == 0 { index.push(IndexOperation::Insert { @@ -3303,10 +3325,7 @@ pub(crate) mod tests { }); } else if i < 5 { // keep renewing 1st - index.push(IndexOperation::Renew { - extrinsic: 0, - hash: x1_hash.as_ref().to_vec(), - }); + index.push(IndexOperation::Renew { extrinsic: 0, hash: x1_hash.as_ref().to_vec() }); } // else stop renewing let hash = insert_block( &backend, @@ -3315,13 +3334,13 @@ pub(crate) mod tests { None, Default::default(), vec![i.into()], - Some(index) + Some(index), ); blocks.push(hash); prev_hash = hash; } - for i in 1 .. 10 { + for i in 1..10 { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(blocks[4])).unwrap(); op.mark_finalized(BlockId::Hash(blocks[i]), None).unwrap(); @@ -3337,15 +3356,20 @@ pub(crate) mod tests { #[test] fn remove_leaf_block_works() { - let backend = Backend::::new_test_with_tx_storage( - 2, - 10, - TransactionStorageMode::StorageChain - ); + let backend = + Backend::::new_test_with_tx_storage(2, 10, TransactionStorageMode::StorageChain); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); - for i in 0 .. 2 { - let hash = insert_block(&backend, i, prev_hash, None, Default::default(), vec![i.into()], None); + for i in 0..2 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ); blocks.push(hash); prev_hash = hash; } @@ -3358,7 +3382,7 @@ pub(crate) mod tests { None, sp_core::H256::random(), vec![42.into()], - None + None, ); assert!(backend.remove_leaf_block(&best_hash).is_err()); assert!(backend.have_state_at(&prev_hash, 1)); diff --git a/substrate/client/db/src/light.rs b/substrate/client/db/src/light.rs index 4e61a9c2ee03de4221df77f21ba90d15d0246d5d..ded5e598fc683c591cc7fa9af8c0a5f366e51e4d 100644 --- a/substrate/client/db/src/light.rs +++ b/substrate/client/db/src/light.rs @@ -18,31 +18,31 @@ //! RocksDB-based light client blockchain storage. -use std::{sync::Arc, collections::HashMap}; -use std::convert::TryInto; use parking_lot::RwLock; +use std::{collections::HashMap, convert::TryInto, sync::Arc}; +use crate::{ + cache::{ComplexBlockId, DbCache, DbCacheSync, EntryType as CacheEntryType}, + utils::{self, block_id_to_lookup_key, meta_keys, read_db, read_meta, DatabaseType, Meta}, + DatabaseSettings, DbHash, FrozenForDuration, +}; +use codec::{Decode, Encode}; +use log::{debug, trace, warn}; use sc_client_api::{ - cht, backend::{AuxStore, NewBlockState, ProvideChtRoots}, UsageInfo, - blockchain::{ - BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo, - }, - Storage, + backend::{AuxStore, NewBlockState, ProvideChtRoots}, + blockchain::{BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo}, + cht, Storage, UsageInfo, }; use sp_blockchain::{ - CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache, - Error as ClientError, Result as ClientResult, - HeaderBackend as BlockchainHeaderBackend, - well_known_cache_keys, + well_known_cache_keys, CachedHeaderMetadata, Error as ClientError, + HeaderBackend as BlockchainHeaderBackend, HeaderMetadata, HeaderMetadataCache, + Result as ClientResult, }; use sp_database::{Database, Transaction}; -use codec::{Decode, Encode}; -use sp_runtime::generic::{DigestItem, BlockId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HashFor}; -use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; -use crate::utils::{self, meta_keys, DatabaseType, Meta, read_db, block_id_to_lookup_key, read_meta}; -use crate::{DatabaseSettings, FrozenForDuration, DbHash}; -use log::{trace, warn, debug}; +use sp_runtime::{ + generic::{BlockId, DigestItem}, + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, Zero}, +}; pub(crate) mod columns { pub const META: u32 = crate::utils::COLUMN_META; @@ -139,8 +139,8 @@ impl LightStorage { } impl BlockchainHeaderBackend for LightStorage - where - Block: BlockT, +where + Block: BlockT, { fn header(&self, id: BlockId) -> ClientResult> { utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) @@ -165,12 +165,8 @@ impl BlockchainHeaderBackend for LightStorage fn status(&self, id: BlockId) -> ClientResult { let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), + BlockId::Hash(_) => + read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some(), BlockId::Number(n) => n <= self.meta.read().best_number, }; match exists { @@ -180,7 +176,9 @@ impl BlockchainHeaderBackend for LightStorage } fn number(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(lookup_key) = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? { + if let Some(lookup_key) = + block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? + { let number = utils::lookup_key_to_number(&lookup_key)?; Ok(Some(number)) } else { @@ -196,17 +194,25 @@ impl BlockchainHeaderBackend for LightStorage impl HeaderMetadata for LightStorage { type Error = ClientError; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).map_or_else(|| { - self.header(BlockId::hash(hash))?.map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header_metadata.hash, - header_metadata.clone(), - ); - header_metadata - }).ok_or_else(|| ClientError::UnknownBlock(format!("header not found in db: {}", hash))) - }, Ok) + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header_metadata_cache.header_metadata(hash).map_or_else( + || { + self.header(BlockId::hash(hash))? + .map(|header| { + let header_metadata = CachedHeaderMetadata::from(&header); + self.header_metadata_cache + .insert_header_metadata(header_metadata.hash, header_metadata.clone()); + header_metadata + }) + .ok_or_else(|| { + ClientError::UnknownBlock(format!("header not found in db: {}", hash)) + }) + }, + Ok, + ) } fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { @@ -221,10 +227,9 @@ impl HeaderMetadata for LightStorage { impl LightStorage { // Get block changes trie root, if available. fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block) - .map(|header| header.and_then(|header| - header.digest().log(DigestItem::as_changes_trie_root) - .cloned())) + self.header(block).map(|header| { + header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) + }) } /// Handle setting head within a transaction. `route_to` should be the last @@ -251,14 +256,16 @@ impl LightStorage { for retracted in tree_route.retracted() { if retracted.hash == meta.finalized_hash { // TODO: can we recover here? - warn!("Safety failure: reverting finalized block {:?}", - (&retracted.number, &retracted.hash)); + warn!( + "Safety failure: reverting finalized block {:?}", + (&retracted.number, &retracted.hash) + ); } utils::remove_number_to_key_mapping( transaction, columns::KEY_LOOKUP, - retracted.number + retracted.number, )?; } @@ -267,7 +274,7 @@ impl LightStorage { transaction, columns::KEY_LOOKUP, enacted.number, - enacted.hash + enacted.hash, )?; } } @@ -292,10 +299,11 @@ impl LightStorage { ) -> ClientResult<()> { let meta = self.meta.read(); if &meta.finalized_hash != header.parent_hash() { - return Err(::sp_blockchain::Error::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", - meta.finalized_hash, hash), - ).into()) + return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + meta.finalized_hash, hash + )) + .into()) } let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?; @@ -313,12 +321,14 @@ impl LightStorage { }); let new_header_cht_root = cht::compute_root::, _>( - cht::size(), new_cht_number, cht_range.map(|num| self.hash(num)) + cht::size(), + new_cht_number, + cht_range.map(|num| self.hash(num)), )?; transaction.set( columns::CHT, &cht_key(HEADER_CHT_PREFIX, new_cht_start)?, - new_header_cht_root.as_ref() + new_header_cht_root.as_ref(), ); // if the header includes changes trie root, let's build a changes tries roots CHT @@ -329,14 +339,16 @@ impl LightStorage { current_num = current_num + One::one(); Some(old_current_num) }); - let new_changes_trie_cht_root = cht::compute_root::, _>( - cht::size(), new_cht_number, cht_range - .map(|num| self.changes_trie_root(BlockId::Number(num))) - )?; + let new_changes_trie_cht_root = + cht::compute_root::, _>( + cht::size(), + new_cht_number, + cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), + )?; transaction.set( columns::CHT, &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start)?, - new_changes_trie_cht_root.as_ref() + new_changes_trie_cht_root.as_ref(), ); } @@ -354,7 +366,7 @@ impl LightStorage { transaction, columns::KEY_LOOKUP, prune_block, - hash + hash, )?; transaction.remove(columns::HEADER, &lookup_key); } @@ -370,7 +382,7 @@ impl LightStorage { &self, cht_type: u8, cht_size: NumberFor, - block: NumberFor + block: NumberFor, ) -> ClientResult> { let no_cht_for_block = || ClientError::Backend(format!("Missing CHT for block {}", block)); @@ -383,7 +395,8 @@ impl LightStorage { } let cht_start = cht::start_number(cht_size, cht_number); - self.db.get(columns::CHT, &cht_key(cht_type, cht_start)?) + self.db + .get(columns::CHT, &cht_key(cht_type, cht_start)?) .ok_or_else(no_cht_for_block) .and_then(|hash| Block::Hash::decode(&mut &*hash).map_err(|_| no_cht_for_block())) .map(Some) @@ -391,15 +404,20 @@ impl LightStorage { } impl AuxStore for LightStorage - where Block: BlockT, +where + Block: BlockT, { fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { let mut transaction = Transaction::new(); for (k, v) in insert { transaction.set(columns::AUX, k, v); @@ -418,7 +436,8 @@ impl AuxStore for LightStorage } impl Storage for LightStorage - where Block: BlockT, +where + Block: BlockT, { fn import_header( &self, @@ -447,19 +466,12 @@ impl Storage for LightStorage self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; } - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; transaction.set_from_vec(columns::HEADER, &lookup_key, header.encode()); let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header.hash().clone(), - header_metadata, - ); + self.header_metadata_cache + .insert_header_metadata(header.hash().clone(), header_metadata); let is_genesis = number.is_zero(); if is_genesis { @@ -474,25 +486,28 @@ impl Storage for LightStorage }; if finalized { - self.note_finalized( - &mut transaction, - &header, - hash, - )?; + self.note_finalized(&mut transaction, &header, hash)?; } // update changes trie configuration cache if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { - if let Some(new_configuration) = crate::changes_tries_storage::extract_new_configuration(&header) { - cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); + if let Some(new_configuration) = + crate::changes_tries_storage::extract_new_configuration(&header) + { + cache_at + .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); } } { let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) + let cache_ops = cache + .transaction(&mut transaction) .on_block_insert( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), ComplexBlockId::new(hash, number), cache_at, if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, @@ -502,9 +517,10 @@ impl Storage for LightStorage debug!("Light DB Commit {:?} ({})", hash, number); self.db.commit(transaction)?; - cache.commit(cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there are cache_ops; qed"); + cache.commit(cache_ops).expect( + "only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed", + ); } self.update_meta(hash, number, leaf_state.is_best(), finalized); @@ -518,7 +534,11 @@ impl Storage for LightStorage let number = header.number(); let mut transaction = Transaction::new(); - self.set_head_with_transaction(&mut transaction, hash.clone(), (number.clone(), hash.clone()))?; + self.set_head_with_transaction( + &mut transaction, + hash.clone(), + (number.clone(), hash.clone()), + )?; self.db.commit(transaction)?; self.update_meta(hash, header.number().clone(), true, false); @@ -536,17 +556,22 @@ impl Storage for LightStorage self.note_finalized(&mut transaction, &header, hash.clone())?; { let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) + let cache_ops = cache + .transaction(&mut transaction) .on_block_finalize( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), - ComplexBlockId::new(hash, number) + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), + ComplexBlockId::new(hash, number), )? .into_ops(); self.db.commit(transaction)?; - cache.commit(cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there are cache_ops; qed"); + cache.commit(cache_ops).expect( + "only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed", + ); } self.update_meta(hash, header.number().clone(), false, true); @@ -566,7 +591,7 @@ impl Storage for LightStorage #[cfg(not(target_os = "unknown"))] fn usage_info(&self) -> Option { - use sc_client_api::{MemoryInfo, IoInfo, MemorySize}; + use sc_client_api::{IoInfo, MemoryInfo, MemorySize}; // TODO: reimplement IO stats let database_cache = MemorySize::from_bytes(0); @@ -591,7 +616,7 @@ impl Storage for LightStorage state_reads_cache: 0, state_writes_cache: 0, state_writes_nodes: 0, - } + }, }) } @@ -602,7 +627,8 @@ impl Storage for LightStorage } impl ProvideChtRoots for LightStorage - where Block: BlockT, +where + Block: BlockT, { fn header_cht_root( &self, @@ -630,12 +656,14 @@ fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { #[cfg(test)] pub(crate) mod tests { + use super::*; use sc_client_api::cht; - use sp_core::ChangesTrieConfiguration; - use sp_runtime::generic::{DigestItem, ChangesTrieSignal}; - use sp_runtime::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; use sp_blockchain::{lowest_common_ancestor, tree_route}; - use super::*; + use sp_core::ChangesTrieConfiguration; + use sp_runtime::{ + generic::{ChangesTrieSignal, DigestItem}, + testing::{Block as RawBlock, ExtrinsicWrapper, Header, H256 as Hash}, + }; type Block = RawBlock>; type AuthorityId = sp_core::ed25519::Public; @@ -652,7 +680,10 @@ pub(crate) mod tests { fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { let mut header = default_header(parent, number); - header.digest.logs.push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); + header + .digest + .logs + .push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); header } @@ -698,7 +729,8 @@ pub(crate) mod tests { #[test] fn returns_known_header() { let db = LightStorage::new_test(); - let known_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let known_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); assert_eq!(header_by_hash, header_by_number); @@ -714,7 +746,8 @@ pub(crate) mod tests { #[test] fn returns_info() { let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); let info = db.info(); assert_eq!(info.best_hash, genesis_hash); assert_eq!(info.best_number, 0); @@ -729,17 +762,22 @@ pub(crate) mod tests { #[test] fn returns_block_status() { let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(db.status(BlockId::Hash(genesis_hash)).unwrap(), BlockStatus::InChain); assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), BlockStatus::Unknown); + assert_eq!( + db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), + BlockStatus::Unknown + ); assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); } #[test] fn returns_block_hash() { let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); assert_eq!(db.hash(1).unwrap(), None); } @@ -749,7 +787,8 @@ pub(crate) mod tests { let raw_db = Arc::new(sp_database::MemDb::default()); let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let genesis_hash = + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(raw_db.count(columns::HEADER), 1); assert_eq!(raw_db.count(columns::KEY_LOOKUP), 2); @@ -760,43 +799,41 @@ pub(crate) mod tests { #[test] fn finalized_ancient_headers_are_replaced_with_cht() { - fn insert_headers Header>(header_producer: F) -> - (Arc, LightStorage) - { + fn insert_headers Header>( + header_producer: F, + ) -> (Arc, LightStorage) { let raw_db = Arc::new(sp_database::MemDb::default()); let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); let cht_size: u64 = cht::size(); let ucht_size: usize = cht_size as _; // insert genesis block header (never pruned) - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); + let mut prev_hash = + insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); // insert SIZE blocks && ensure that nothing is pruned for number in 0..cht::size() { - prev_hash = insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); + prev_hash = + insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); } assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size); assert_eq!(raw_db.count(columns::CHT), 0); // insert next SIZE blocks && ensure that nothing is pruned for number in 0..(cht_size as _) { - prev_hash = insert_block( - &db, - HashMap::new(), - || header_producer(&prev_hash, 1 + cht_size + number), - ); + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht_size + number) + }); } assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size); assert_eq!(raw_db.count(columns::CHT), 0); // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of this CHT are pruned // nothing is yet finalized, so nothing is pruned. - prev_hash = insert_block( - &db, - HashMap::new(), - || header_producer(&prev_hash, 1 + cht_size + cht_size), - ); + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht_size + cht_size) + }); assert_eq!(raw_db.count(columns::HEADER), 2 + ucht_size + ucht_size); assert_eq!(raw_db.count(columns::CHT), 0); @@ -839,7 +876,10 @@ pub(crate) mod tests { #[test] fn get_cht_fails_for_non_existent_cht() { let cht_size: u64 = cht::size(); - assert!(LightStorage::::new_test().header_cht_root(cht_size, cht_size / 2).unwrap().is_none()); + assert!(LightStorage::::new_test() + .header_cht_root(cht_size, cht_size / 2) + .unwrap() + .is_none()); } #[test] @@ -847,26 +887,41 @@ pub(crate) mod tests { let db = LightStorage::new_test(); // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_with_changes_trie(&Default::default(), 0)); + let mut prev_hash = insert_final_block(&db, HashMap::new(), || { + header_with_changes_trie(&Default::default(), 0) + }); let cht_size: u64 = cht::size(); let ucht_size: usize = cht_size as _; for i in 1..1 + ucht_size + ucht_size + 1 { - prev_hash = insert_block(&db, HashMap::new(), || header_with_changes_trie(&prev_hash, i as u64)); + prev_hash = insert_block(&db, HashMap::new(), || { + header_with_changes_trie(&prev_hash, i as u64) + }); db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); } - let cht_root_1 = db.header_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db.header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2).unwrap().unwrap(); - let cht_root_3 = db.header_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); + let cht_root_1 = + db.header_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); + let cht_root_2 = db + .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = + db.header_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); - let cht_root_1 = db.changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db.changes_trie_cht_root( - cht_size, - cht::start_number(cht_size, 0) + cht_size / 2, - ).unwrap().unwrap(); - let cht_root_3 = db.changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); + let cht_root_1 = db + .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap() + .unwrap(); + let cht_root_2 = db + .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = db + .changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap() + .unwrap(); assert_eq!(cht_root_1, cht_root_2); assert_eq!(cht_root_2, cht_root_3); } @@ -882,15 +937,23 @@ pub(crate) mod tests { let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, Hash::from([1; 32]))); + let b1 = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) + }); let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); { let tree_route = tree_route(&db, a3, b2).unwrap(); assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![b1, b2] + ); } { @@ -898,14 +961,20 @@ pub(crate) mod tests { assert_eq!(tree_route.common_block().hash, a1); assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); + assert_eq!( + tree_route.enacted().iter().map(|r| r.hash).collect::>(), + vec![a2, a3] + ); } { let tree_route = tree_route(&db, a3, a1).unwrap(); assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); + assert_eq!( + tree_route.retracted().iter().map(|r| r.hash).collect::>(), + vec![a3, a2] + ); assert!(tree_route.enacted().is_empty()); } @@ -929,7 +998,9 @@ pub(crate) mod tests { let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, Hash::from([1; 32]))); + let b1 = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) + }); let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); { @@ -979,7 +1050,11 @@ pub(crate) mod tests { fn authorities_are_cached() { let db = LightStorage::new_test(); - fn run_checks(db: &LightStorage, max: u64, checks: &[(u64, Option>)]) { + fn run_checks( + db: &LightStorage, + max: u64, + checks: &[(u64, Option>)], + ) { for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { let actual = authorities(db.cache(), BlockId::Number(*at)); assert_eq!(*expected, actual); @@ -990,14 +1065,21 @@ pub(crate) mod tests { HashMap::new() } - fn make_authorities(authorities: Vec) -> HashMap> { + fn make_authorities( + authorities: Vec, + ) -> HashMap> { let mut map = HashMap::new(); map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); map } - fn authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).unwrap_or(None) + fn authorities( + cache: &dyn BlockchainCache, + at: BlockId, + ) -> Option> { + cache + .get_at(&well_known_cache_keys::AUTHORITIES, &at) + .unwrap_or(None) .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) } @@ -1021,17 +1103,27 @@ pub(crate) mod tests { (6, Some(vec![auth1(), auth2()])), ]; - let hash0 = insert_final_block(&db, same_authorities(), || default_header(&Default::default(), 0)); + let hash0 = insert_final_block(&db, same_authorities(), || { + default_header(&Default::default(), 0) + }); run_checks(&db, 0, &checks); let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); run_checks(&db, 1, &checks); - let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash1, 2)); + let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash1, 2) + }); run_checks(&db, 2, &checks); - let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); + let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); run_checks(&db, 3, &checks); - let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash3, 4)); + let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash3, 4) + }); run_checks(&db, 4, &checks); - let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash4, 5)); + let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash4, 5) + }); run_checks(&db, 5, &checks); let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); run_checks(&db, 6, &checks); @@ -1043,9 +1135,14 @@ pub(crate) mod tests { // some older non-best blocks are inserted // ... -> B2(1) -> B2_1(1) -> B2_2(2) // => the cache ignores all writes before best finalized block - let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); + let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_1))); - let hash2_2 = insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash2_1, 4)); + let hash2_2 = + insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash2_1, 4) + }); assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_2))); } @@ -1056,51 +1153,41 @@ pub(crate) mod tests { // \> B6_1_1(5) // \> B6_1_2(6) -> B6_1_3(7) - let hash7 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash7 = + insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - let hash8 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash8 = + insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - let hash6_1 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_1 = + insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - let hash6_2 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + let hash6_2 = + insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); @@ -1114,10 +1201,7 @@ pub(crate) mod tests { { // finalize block hash6_1 db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); @@ -1126,10 +1210,7 @@ pub(crate) mod tests { assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); // finalize block hash6_2 db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]),); assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); @@ -1142,7 +1223,8 @@ pub(crate) mod tests { #[test] fn database_is_reopened() { let db = LightStorage::new_test(); - let hash0 = insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let hash0 = + insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!(db.info().best_hash, hash0); assert_eq!(db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), hash0); @@ -1157,7 +1239,8 @@ pub(crate) mod tests { let db = LightStorage::::new_test(); // insert aux1 + aux2 using direct store access - db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()).unwrap(); + db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()) + .unwrap(); // check aux values assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); @@ -1165,10 +1248,13 @@ pub(crate) mod tests { assert_eq!(db.get_aux(&[3]).unwrap(), None); // delete aux1 + insert aux3 using import operation - db.import_header(default_header(&Default::default(), 0), HashMap::new(), NewBlockState::Best, vec![ - (vec![3], Some(vec![103])), - (vec![1], None), - ]).unwrap(); + db.import_header( + default_header(&Default::default(), 0), + HashMap::new(), + NewBlockState::Best, + vec![(vec![3], Some(vec![103])), (vec![1], None)], + ) + .unwrap(); // check aux values assert_eq!(db.get_aux(&[1]).unwrap(), None); @@ -1208,7 +1294,8 @@ pub(crate) mod tests { }; // restart && check that after restart value is read from the cache - let db = LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); + let db = + LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); assert_eq!( db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), Some(((0, genesis_hash.unwrap()), None, vec![42])), @@ -1224,7 +1311,9 @@ pub(crate) mod tests { // insert block#0 && block#1 (no value for cache is provided) let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)).unwrap() + db.cache() + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)) + .unwrap() .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), None, ); @@ -1232,13 +1321,15 @@ pub(crate) mod tests { // insert configuration at block#1 (starts from block#2) insert_block(&db, HashMap::new(), || { let mut header = default_header(&hash0, 1); - header.digest_mut().push( - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_config.clone())) - ); + header.digest_mut().push(DigestItem::ChangesTrieSignal( + ChangesTrieSignal::NewConfiguration(new_config.clone()), + )); header }); assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)).unwrap() + db.cache() + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)) + .unwrap() .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), Some(new_config), ); diff --git a/substrate/client/db/src/offchain.rs b/substrate/client/db/src/offchain.rs index df45c4946e6226d0dcb5359602fb6dcba51e0ff6..c31273ff07c63b95bc48db467a7ebf4f9061a375 100644 --- a/substrate/client/db/src/offchain.rs +++ b/substrate/client/db/src/offchain.rs @@ -21,8 +21,8 @@ use std::{collections::HashMap, sync::Arc}; use crate::{columns, Database, DbHash, Transaction}; -use parking_lot::Mutex; use log::error; +use parking_lot::Mutex; /// Offchain local storage #[derive(Clone)] @@ -33,8 +33,7 @@ pub struct LocalStorage { impl std::fmt::Debug for LocalStorage { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("LocalStorage") - .finish() + fmt.debug_struct("LocalStorage").finish() } } @@ -49,10 +48,7 @@ impl LocalStorage { /// Create offchain local storage with given `KeyValueDB` backend. pub fn new(db: Arc>) -> Self { - Self { - db, - locks: Default::default(), - } + Self { db, locks: Default::default() } } } @@ -118,11 +114,7 @@ impl sp_core::offchain::OffchainStorage for LocalStorage { /// Concatenate the prefix and key to create an offchain key in the db. pub(crate) fn concatenate_prefix_and_key(prefix: &[u8], key: &[u8]) -> Vec { - prefix - .iter() - .chain(key.into_iter()) - .cloned() - .collect() + prefix.iter().chain(key.into_iter()).cloned().collect() } #[cfg(test)] @@ -155,5 +147,4 @@ mod tests { assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); assert!(storage.locks.lock().is_empty(), "Locks map should be empty!"); } - } diff --git a/substrate/client/db/src/parity_db.rs b/substrate/client/db/src/parity_db.rs index ed39c1e9f669f365b0b1abdb674c12ee8a13a2a4..07f58baf015419f85e97fcfe5162c706fc539203 100644 --- a/substrate/client/db/src/parity_db.rs +++ b/substrate/client/db/src/parity_db.rs @@ -15,27 +15,29 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::{ + columns, + utils::{DatabaseType, NUM_COLUMNS}, +}; /// A `Database` adapter for parity-db. - -use sp_database::{Database, Change, ColumnId, Transaction, error::DatabaseError}; -use crate::utils::{DatabaseType, NUM_COLUMNS}; -use crate::columns; +use sp_database::{error::DatabaseError, Change, ColumnId, Database, Transaction}; struct DbAdapter(parity_db::Db); fn handle_err(result: parity_db::Result) -> T { match result { Ok(r) => r, - Err(e) => { + Err(e) => { panic!("Critical database error: {:?}", e); - } + }, } } /// Wrap parity-db database into a trait object that implements `sp_database::Database` -pub fn open>(path: &std::path::Path, db_type: DatabaseType) - -> parity_db::Result>> -{ +pub fn open>( + path: &std::path::Path, + db_type: DatabaseType, +) -> parity_db::Result>> { let mut config = parity_db::Options::with_columns(path, NUM_COLUMNS as u8); config.sync = true; // Flush each commit if db_type == DatabaseType::Full { @@ -50,13 +52,11 @@ pub fn open>(path: &std::path::Path, db_type: DatabaseTyp impl> Database for DbAdapter { fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { - handle_err(self.0.commit(transaction.0.into_iter().map(|change| - match change { - Change::Set(col, key, value) => (col as u8, key, Some(value)), - Change::Remove(col, key) => (col as u8, key, None), - _ => unimplemented!(), - })) - ); + handle_err(self.0.commit(transaction.0.into_iter().map(|change| match change { + Change::Set(col, key, value) => (col as u8, key, Some(value)), + Change::Remove(col, key) => (col as u8, key, None), + _ => unimplemented!(), + }))); Ok(()) } diff --git a/substrate/client/db/src/stats.rs b/substrate/client/db/src/stats.rs index 3fd93db931d029fcb2a8da73fa75f674f6a5b438..9223142ef5abaaef2fcc7dd03eeebcda57f4d25e 100644 --- a/substrate/client/db/src/stats.rs +++ b/substrate/client/db/src/stats.rs @@ -65,7 +65,10 @@ impl StateUsageStats { /// Tally one key read. pub fn tally_key_read(&self, key: &[u8], val: Option<&Vec>, cache: bool) { - self.tally_read(key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), cache); + self.tally_read( + key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), + cache, + ); } /// Tally one child key read. @@ -103,9 +106,11 @@ impl StateUsageStats { self.reads.fetch_add(info.reads.ops, AtomicOrdering::Relaxed); self.bytes_read.fetch_add(info.reads.bytes, AtomicOrdering::Relaxed); self.writes_nodes.fetch_add(info.nodes_writes.ops, AtomicOrdering::Relaxed); - self.bytes_written_nodes.fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); + self.bytes_written_nodes + .fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); self.removed_nodes.fetch_add(info.removed_nodes.ops, AtomicOrdering::Relaxed); - self.bytes_removed_nodes.fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); + self.bytes_removed_nodes + .fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); self.reads_cache.fetch_add(info.cache_reads.ops, AtomicOrdering::Relaxed); self.bytes_read_cache.fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed); } diff --git a/substrate/client/db/src/storage_cache.rs b/substrate/client/db/src/storage_cache.rs index e4b59514654617a73ff6b0fe4700849cef8d0723..d5aa43e8bac931aeeeb6e9235c3043f50e91fe05 100644 --- a/substrate/client/db/src/storage_cache.rs +++ b/substrate/client/db/src/storage_cache.rs @@ -20,21 +20,22 @@ //! Tracks changes over the span of a few recent blocks and handles forks //! by tracking/removing cache entries for conflicting changes. -use std::collections::{VecDeque, HashSet, HashMap}; -use std::sync::Arc; -use std::hash::Hash as StdHash; -use parking_lot::{RwLock, RwLockUpgradableReadGuard}; -use linked_hash_map::{LinkedHashMap, Entry}; +use crate::{stats::StateUsageStats, utils::Meta}; use hash_db::Hasher; -use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; -use sp_core::hexdisplay::HexDisplay; -use sp_core::storage::ChildInfo; +use linked_hash_map::{Entry, LinkedHashMap}; +use log::trace; +use parking_lot::{RwLock, RwLockUpgradableReadGuard}; +use sp_core::{hexdisplay::HexDisplay, storage::ChildInfo}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor}; use sp_state_machine::{ - backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue, - StorageCollection, ChildStorageCollection, + backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey, + StorageValue, TrieBackend, +}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + hash::Hash as StdHash, + sync::Arc, }; -use log::trace; -use crate::{utils::Meta, stats::StateUsageStats}; const STATE_CACHE_BLOCKS: usize = 12; @@ -75,7 +76,7 @@ impl EstimateSize for Vec { impl EstimateSize for Option> { fn estimate_size(&self) -> usize { - self.as_ref().map(|v|v.capacity()).unwrap_or(0) + self.as_ref().map(|v| v.capacity()).unwrap_or(0) } } @@ -84,7 +85,7 @@ struct OptionHOut>(Option); impl> EstimateSize for OptionHOut { fn estimate_size(&self) -> usize { // capacity would be better - self.0.as_ref().map(|v|v.as_ref().len()).unwrap_or(0) + self.0.as_ref().map(|v| v.as_ref().len()).unwrap_or(0) } } @@ -125,20 +126,22 @@ impl LRUMap { }; while *storage_used_size > limit { - if let Some((k,v)) = lmap.pop_front() { + if let Some((k, v)) = lmap.pop_front() { *storage_used_size -= k.estimate_size(); *storage_used_size -= v.estimate_size(); } else { // can happen fairly often as we get value from multiple lru // and only remove from a single lru - break; + break } } } - fn get(&mut self, k: &Q) -> Option<&mut V> - where K: std::borrow::Borrow, - Q: StdHash + Eq { + fn get(&mut self, k: &Q) -> Option<&mut V> + where + K: std::borrow::Borrow, + Q: StdHash + Eq, + { self.0.get_refresh(k) } @@ -149,15 +152,13 @@ impl LRUMap { self.0.clear(); self.1 = 0; } - } impl Cache { /// Returns the used memory size of the storage cache in bytes. pub fn used_storage_cache_size(&self) -> usize { - self.lru_storage.used_size() - + self.lru_child_storage.used_size() - // ignore small hashes storage and self.lru_hashes.used_size() + self.lru_storage.used_size() + self.lru_child_storage.used_size() + // ignore small hashes storage and self.lru_hashes.used_size() } /// Synchronize the shared cache with the best block state. @@ -233,20 +234,16 @@ pub fn new_shared_cache( child_ratio: (usize, usize), ) -> SharedCache { let top = child_ratio.1.saturating_sub(child_ratio.0); - Arc::new( - RwLock::new( - Cache { - lru_storage: LRUMap( - LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1 - ), - lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE), - lru_child_storage: LRUMap( - LinkedHashMap::new(), 0, shared_cache_size * child_ratio.0 / child_ratio.1 - ), - modifications: VecDeque::new(), - } - ) - ) + Arc::new(RwLock::new(Cache { + lru_storage: LRUMap(LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1), + lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE), + lru_child_storage: LRUMap( + LinkedHashMap::new(), + 0, + shared_cache_size * child_ratio.0 / child_ratio.1, + ), + modifications: VecDeque::new(), + })) } #[derive(Debug)] @@ -393,16 +390,15 @@ impl CacheChanges { } } - if let ( - Some(ref number), Some(ref hash), Some(ref parent)) - = (commit_number, commit_hash, self.parent_hash) + if let (Some(ref number), Some(ref hash), Some(ref parent)) = + (commit_number, commit_hash, self.parent_hash) { if cache.modifications.len() == STATE_CACHE_BLOCKS { cache.modifications.pop_back(); } let mut modifications = HashSet::new(); let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes)| + child_changes.into_iter().for_each(|(sk, changes)| { for (k, v) in changes.into_iter() { let k = (sk.clone(), k); if is_best { @@ -410,7 +406,7 @@ impl CacheChanges { } child_modifications.insert(k); } - ); + }); for (k, v) in changes.into_iter() { if is_best { cache.lru_hashes.remove(&k); @@ -428,7 +424,9 @@ impl CacheChanges { is_canon: is_best, parent: parent.clone(), }; - let insert_at = cache.modifications.iter() + let insert_at = cache + .modifications + .iter() .enumerate() .find(|(_, m)| m.number < *number) .map(|(i, _)| i); @@ -471,13 +469,16 @@ impl>, B: BlockT> CachingState { key: Option<&[u8]>, child_key: Option<&ChildStorageKey>, parent_hash: &Option, - modifications: &VecDeque> + modifications: &VecDeque>, ) -> bool { let mut parent = match *parent_hash { None => { - trace!("Cache lookup skipped for {:?}: no parent hash", key.as_ref().map(HexDisplay::from)); - return false; - } + trace!( + "Cache lookup skipped for {:?}: no parent hash", + key.as_ref().map(HexDisplay::from) + ); + return false + }, Some(ref parent) => parent, }; // Ignore all storage entries modified in later blocks. @@ -488,20 +489,23 @@ impl>, B: BlockT> CachingState { for m in modifications { if &m.hash == parent { if m.is_canon { - return true; + return true } parent = &m.parent; } if let Some(key) = key { if m.storage.contains(key) { - trace!("Cache lookup skipped for {:?}: modified in a later block", HexDisplay::from(&key)); - return false; + trace!( + "Cache lookup skipped for {:?}: modified in a later block", + HexDisplay::from(&key) + ); + return false } } if let Some(child_key) = child_key { if m.child_storage.contains(child_key) { trace!("Cache lookup skipped for {:?}: modified in a later block", child_key); - return false; + return false } } } @@ -540,7 +544,9 @@ impl>, B: BlockT> StateBackend> for Cachin } trace!("Cache miss: {:?}", HexDisplay::from(&key)); let value = self.state.storage(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).storage.insert(key.to_vec(), value.clone()); + RwLockUpgradableReadGuard::upgrade(local_cache) + .storage + .insert(key.to_vec(), value.clone()); self.usage.tally_key_read(key, value.as_ref(), false); Ok(value) } @@ -563,7 +569,9 @@ impl>, B: BlockT> StateBackend> for Cachin } trace!("Cache hash miss: {:?}", HexDisplay::from(&key)); let hash = self.state.storage_hash(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).hashes.insert(key.to_vec(), hash); + RwLockUpgradableReadGuard::upgrade(local_cache) + .hashes + .insert(key.to_vec(), hash); Ok(hash) } @@ -576,9 +584,7 @@ impl>, B: BlockT> StateBackend> for Cachin let local_cache = self.cache.local_cache.upgradable_read(); if let Some(entry) = local_cache.child_storage.get(&key).cloned() { trace!("Found in local cache: {:?}", key); - return Ok( - self.usage.tally_child_key_read(&key, entry, true) - ) + return Ok(self.usage.tally_child_key_read(&key, entry, true)) } { let cache = self.cache.shared_cache.upgradable_read(); @@ -586,9 +592,7 @@ impl>, B: BlockT> StateBackend> for Cachin let mut cache = RwLockUpgradableReadGuard::upgrade(cache); if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) { trace!("Found in shared cache: {:?}", key); - return Ok( - self.usage.tally_child_key_read(&key, entry, true) - ) + return Ok(self.usage.tally_child_key_read(&key, entry, true)) } } } @@ -596,9 +600,11 @@ impl>, B: BlockT> StateBackend> for Cachin let value = self.state.child_storage(child_info, &key.1[..])?; // just pass it through the usage counter - let value = self.usage.tally_child_key_read(&key, value, false); + let value = self.usage.tally_child_key_read(&key, value, false); - RwLockUpgradableReadGuard::upgrade(local_cache).child_storage.insert(key, value.clone()); + RwLockUpgradableReadGuard::upgrade(local_cache) + .child_storage + .insert(key, value.clone()); Ok(value) } @@ -622,7 +628,8 @@ impl>, B: BlockT> StateBackend> for Cachin f: F, allow_missing: bool, ) -> Result { - self.state.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + self.state + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) } fn apply_to_keys_while bool>( @@ -665,16 +672,22 @@ impl>, B: BlockT> StateBackend> for Cachin fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { self.state.storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { self.state.child_storage_root(child_info, delta) } @@ -686,11 +699,7 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.state.child_keys(child_info, prefix) } @@ -739,13 +748,7 @@ impl SyncingCachingState { meta: Arc, B::Hash>>>, lock: Arc>, ) -> Self { - Self { - caching_state: Some(caching_state), - state_usage, - meta, - lock, - disable_syncing: false, - } + Self { caching_state: Some(caching_state), state_usage, meta, lock, disable_syncing: false } } /// Returns the reference to the internal [`CachingState`]. @@ -775,7 +778,9 @@ impl std::fmt::Debug for SyncingCachingState { } } -impl>, B: BlockT> StateBackend> for SyncingCachingState { +impl>, B: BlockT> StateBackend> + for SyncingCachingState +{ type Error = S::Error; type Transaction = S::Transaction; type TrieBackendStorage = S::TrieBackendStorage; @@ -816,7 +821,13 @@ impl>, B: BlockT> StateBackend> for Syncin f: F, allow_missing: bool, ) -> Result { - self.caching_state().apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + self.caching_state().apply_to_key_values_while( + child_info, + prefix, + start_at, + f, + allow_missing, + ) } fn apply_to_keys_while bool>( @@ -859,16 +870,22 @@ impl>, B: BlockT> StateBackend> for Syncin fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, Self::Transaction) + where + B::Hash: Ord, + { self.caching_state().storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { + delta: impl Iterator)>, + ) -> (B::Hash, bool, Self::Transaction) + where + B::Hash: Ord, + { self.caching_state().child_storage_root(child_info, delta) } @@ -880,11 +897,7 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.caching_state().child_keys(child_info, prefix) } @@ -907,7 +920,7 @@ impl>, B: BlockT> StateBackend> for Syncin impl Drop for SyncingCachingState { fn drop(&mut self) { if self.disable_syncing { - return; + return } if let Some(mut caching_state) = self.caching_state.take() { @@ -926,8 +939,8 @@ impl Drop for SyncingCachingState { mod tests { use super::*; use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, }; use sp_state_machine::InMemoryBackend; @@ -935,7 +948,7 @@ mod tests { #[test] fn smoke() { - //init_log(); + // init_log(); let root_parent = H256::random(); let key = H256::random()[..].to_vec(); let h0 = H256::random(); @@ -965,18 +978,12 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); s.cache.sync_cache( &[], &[], @@ -987,11 +994,8 @@ mod tests { false, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); s.cache.sync_cache( &[], &[], @@ -1002,11 +1006,8 @@ mod tests { false, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1a), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); s.cache.sync_cache( &[], &[], @@ -1017,48 +1018,30 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1a)); assert!(s.storage(&key).unwrap().is_none()); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); assert!(s.storage(&key).unwrap().is_none()); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1b), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1b)); assert!(s.storage(&key).unwrap().is_none()); // reorg to 3b // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); s.cache.sync_cache( &[h1b, h2b, h3b], &[h1a, h2a, h3a], @@ -1068,11 +1051,8 @@ mod tests { Some(3), true, ); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); assert!(s.storage(&key).unwrap().is_none()); } @@ -1087,7 +1067,7 @@ mod tests { let h2b = H256::random(); let h3b = H256::random(); - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( InMemoryBackend::::default(), @@ -1104,18 +1084,12 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache( &[], &[], @@ -1126,11 +1100,8 @@ mod tests { false, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); s.cache.sync_cache( &[], &[], @@ -1141,11 +1112,8 @@ mod tests { false, ); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); } @@ -1159,7 +1127,7 @@ mod tests { let h3a = H256::random(); let h3b = H256::random(); - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( InMemoryBackend::::default(), @@ -1168,18 +1136,12 @@ mod tests { ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2a)); s.cache.sync_cache( &[], &[], @@ -1190,18 +1152,12 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h2b)); s.cache.sync_cache( &[], &[], @@ -1212,11 +1168,8 @@ mod tests { false, ); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h3a)); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); } @@ -1227,15 +1180,11 @@ mod tests { let h1a = H256::random(); let h1b = H256::random(); - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut backend = InMemoryBackend::::default(); backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))]))); - let mut s = CachingState::new( - backend.clone(), - shared.clone(), - Some(root_parent), - ); + let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); s.cache.sync_cache( &[], &[], @@ -1246,29 +1195,23 @@ mod tests { true, ); - let mut s = CachingState::new( - backend.clone(), - shared.clone(), - Some(root_parent), - ); + let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); s.cache.sync_cache(&[], &[h1a], vec![], vec![], Some(h1b), Some(1), true); - let s = CachingState::new( - backend.clone(), - shared.clone(), - Some(h1b), - ); + let s = CachingState::new(backend.clone(), shared.clone(), Some(h1b)); assert_eq!(s.storage_hash(&key).unwrap().unwrap(), BlakeTwo256::hash(&vec![1])); } #[test] fn should_track_used_size_correctly() { let root_parent = H256::random(); - let shared = new_shared_cache::(109, ((109-36), 109)); + let shared = new_shared_cache::(109, ((109 - 36), 109)); let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent.clone()), ); let key = H256::random()[..].to_vec(); @@ -1302,7 +1245,7 @@ mod tests { #[test] fn should_remove_lru_items_based_on_tracking_used_size() { let root_parent = H256::random(); - let shared = new_shared_cache::(36*3, (2,3)); + let shared = new_shared_cache::(36 * 3, (2, 3)); let h0 = H256::random(); let mut s = CachingState::new( @@ -1364,11 +1307,8 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h0)); s.cache.sync_cache( &[], &[], @@ -1379,11 +1319,8 @@ mod tests { true, ); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); assert_eq!(s.storage(&key).unwrap(), Some(vec![3])); // Restart (or unknown block?), clear caches. @@ -1402,11 +1339,8 @@ mod tests { // New value is propagated. s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); assert_eq!(s.storage(&key).unwrap(), None); } @@ -1419,7 +1353,7 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( InMemoryBackend::::default(), @@ -1437,11 +1371,8 @@ mod tests { ); assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); // commit as non-best s.cache.sync_cache( @@ -1456,36 +1387,25 @@ mod tests { assert_eq!(shared.write().lru_storage.get(&key).unwrap(), &Some(vec![1])); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); + let mut s = + CachingState::new(InMemoryBackend::::default(), shared.clone(), Some(h1)); // commit again as best with no changes - s.cache.sync_cache( - &[], - &[], - vec![], - vec![], - Some(h2), - Some(2), - true, - ); + s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2), Some(2), true); assert_eq!(s.storage(&key).unwrap(), None); } } #[cfg(test)] mod qc { - use std::collections::{HashMap, hash_map::Entry}; + use std::collections::{hash_map::Entry, HashMap}; - use quickcheck::{quickcheck, TestResult, Arbitrary}; + use quickcheck::{quickcheck, Arbitrary, TestResult}; use super::*; use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, }; use sp_state_machine::InMemoryBackend; @@ -1507,28 +1427,24 @@ mod qc { fn new_next(&self, hash: H256, changes: KeySet) -> Self { let mut state = self.state.clone(); - for (k, v) in self.state.iter() { state.insert(k.clone(), v.clone()); } - for (k, v) in changes.clone().into_iter() { state.insert(k, v); } - - Self { - hash, - parent: self.hash, - changes, - state, + for (k, v) in self.state.iter() { + state.insert(k.clone(), v.clone()); } + for (k, v) in changes.clone().into_iter() { + state.insert(k, v); + } + + Self { hash, parent: self.hash, changes, state } } fn new(hash: H256, parent: H256, changes: KeySet) -> Self { let mut state = KeyMap::new(); - for (k, v) in changes.clone().into_iter() { state.insert(k, v); } - - Self { - hash, - parent, - state, - changes, + for (k, v) in changes.clone().into_iter() { + state.insert(k, v); } + + Self { hash, parent, state, changes } } fn purge(&mut self, other_changes: &KeySet) { @@ -1552,30 +1468,26 @@ mod qc { let buf = (0..32).map(|_| u8::arbitrary(gen)).collect::>(); match path { - 0..=175 => { - Action::Next { - hash: H256::from_slice(&buf[..]), - changes: { - let mut set = Vec::new(); - for _ in 0..::arbitrary(gen)/(64*256*256*256) { - set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); - } - set + 0..=175 => Action::Next { + hash: H256::from_slice(&buf[..]), + changes: { + let mut set = Vec::new(); + for _ in 0..::arbitrary(gen) / (64 * 256 * 256 * 256) { + set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); } - } + set + }, }, - 176..=220 => { - Action::Fork { - hash: H256::from_slice(&buf[..]), - depth: ((u8::arbitrary(gen)) / 32) as usize, - changes: { - let mut set = Vec::new(); - for _ in 0..::arbitrary(gen)/(64*256*256*256) { - set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); - } - set + 176..=220 => Action::Fork { + hash: H256::from_slice(&buf[..]), + depth: ((u8::arbitrary(gen)) / 32) as usize, + changes: { + let mut set = Vec::new(); + for _ in 0..::arbitrary(gen) / (64 * 256 * 256 * 256) { + set.push((vec![u8::arbitrary(gen)], Some(vec![u8::arbitrary(gen)]))); } - } + set + }, }, 221..=240 => { Action::ReorgWithImport { @@ -1586,7 +1498,7 @@ mod qc { _ => { Action::FinalizationReorg { fork_depth: ((u8::arbitrary(gen)) / 32) as usize, // 0-7 - depth: ((u8::arbitrary(gen)) / 64) as usize, // 0-3 + depth: ((u8::arbitrary(gen)) / 64) as usize, // 0-3 } }, } @@ -1601,13 +1513,9 @@ mod qc { impl Mutator { fn new_empty() -> Self { - let shared = new_shared_cache::(256*1024, (0,1)); + let shared = new_shared_cache::(256 * 1024, (0, 1)); - Self { - shared, - canon: vec![], - forks: HashMap::new(), - } + Self { shared, canon: vec![], forks: HashMap::new() } } fn head_state(&self, hash: H256) -> CachingState, Block> { @@ -1626,11 +1534,12 @@ mod qc { &mut self, action: Action, ) -> CachingState, Block> { - self.mutate(action).expect("Expected to provide only valid actions to the mutate_static") + self.mutate(action) + .expect("Expected to provide only valid actions to the mutate_static") } fn canon_len(&self) -> usize { - return self.canon.len(); + return self.canon.len() } fn head_storage_ref(&self) -> &KeyMap { @@ -1648,10 +1557,10 @@ mod qc { let state = match action { Action::Fork { depth, hash, changes } => { let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len()-1) as isize + if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len() - 1) as isize // no fork on top also, thus len-1 { - return Err(()); + return Err(()) } let pos = pos as usize; @@ -1661,7 +1570,8 @@ mod qc { let (total_h, parent) = match self.forks.entry(fork_at) { Entry::Occupied(occupied) => { let chain = occupied.into_mut(); - let parent = chain.last().expect("No empty forks are ever created").clone(); + let parent = + chain.last().expect("No empty forks are ever created").clone(); let mut node = parent.new_next(hash, changes.clone()); for earlier in chain.iter() { @@ -1677,7 +1587,7 @@ mod qc { vacant.insert(vec![canon_parent.new_next(hash, changes.clone())]); (pos + 1, fork_at) - } + }, }; let mut state = CachingState::new( @@ -1704,9 +1614,7 @@ mod qc { let parent_hash = H256::from(&[0u8; 32]); (Node::new(hash, parent_hash, changes.clone()), parent_hash) }, - Some(parent) => { - (parent.new_next(hash, changes.clone()), parent.hash) - } + Some(parent) => (parent.new_next(hash, changes.clone()), parent.hash), }; // delete cache entries for earlier @@ -1741,22 +1649,26 @@ mod qc { }, Action::ReorgWithImport { depth, hash } => { let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); } + if pos < 0 || pos + 1 >= self.canon.len() as isize { + return Err(()) + } let fork_at = self.canon[pos as usize].hash; let pos = pos as usize; match self.forks.get_mut(&fork_at) { Some(chain) => { - let mut new_fork = self.canon.drain(pos+1..).collect::>(); + let mut new_fork = self.canon.drain(pos + 1..).collect::>(); - let retracted: Vec = new_fork.iter().map(|node| node.hash).collect(); + let retracted: Vec = + new_fork.iter().map(|node| node.hash).collect(); let enacted: Vec = chain.iter().map(|node| node.hash).collect(); std::mem::swap(chain, &mut new_fork); - let mut node = new_fork.last().map( - |node| node.new_next(hash, vec![]) - ).expect("No empty fork ever created!"); + let mut node = new_fork + .last() + .map(|node| node.new_next(hash, vec![])) + .expect("No empty fork ever created!"); for invalidators in chain.iter().chain(new_fork.iter()) { node.purge(&invalidators.changes); @@ -1784,44 +1696,54 @@ mod qc { ); state - } + }, None => { - return Err(()); // no reorg without a fork atm! + return Err(()) // no reorg without a fork atm! }, } }, Action::FinalizationReorg { fork_depth, depth } => { let pos = self.canon.len() as isize - fork_depth as isize; - if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); } + if pos < 0 || pos + 1 >= self.canon.len() as isize { + return Err(()) + } let fork_at = self.canon[pos as usize].hash; let pos = pos as usize; match self.forks.get_mut(&fork_at) { Some(fork_chain) => { - let sync_pos = fork_chain.len() as isize - fork_chain.len() as isize - depth as isize; - if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { return Err (()); } + let sync_pos = fork_chain.len() as isize - + fork_chain.len() as isize - depth as isize; + if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { + return Err(()) + } let sync_pos = sync_pos as usize; - let mut new_fork = self.canon.drain(pos+1..).collect::>(); + let mut new_fork = self.canon.drain(pos + 1..).collect::>(); - let retracted: Vec = new_fork.iter().map(|node| node.hash).collect(); - let enacted: Vec = fork_chain.iter().take(sync_pos+1).map(|node| node.hash).collect(); + let retracted: Vec = + new_fork.iter().map(|node| node.hash).collect(); + let enacted: Vec = fork_chain + .iter() + .take(sync_pos + 1) + .map(|node| node.hash) + .collect(); std::mem::swap(fork_chain, &mut new_fork); self.shared.write().sync(&retracted, &enacted); self.head_state( - self.canon.last() - .expect("wasn't forking to emptiness so there should be one!") - .hash + self.canon + .last() + .expect("wasn't forking to emptiness so there should be one!") + .hash, ) }, None => { - return Err(()); // no reorg to nothing pls! - } + return Err(()) // no reorg to nothing pls! + }, } - }, }; @@ -1841,14 +1763,27 @@ mod qc { let h3b = H256::random(); let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] }); + mutator + .mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] }); mutator.mutate_static(Action::Next { hash: h1a, changes: vec![] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![(key.clone(), Some(vec![4]))] }); - mutator.mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h1b, + changes: vec![(key.clone(), Some(vec![3]))], + }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h2b, + changes: vec![(key.clone(), Some(vec![4]))], + }); + mutator + .mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] }); mutator.mutate_static(Action::Next { hash: h3a, changes: vec![] }); - assert_eq!(mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"), vec![5]); + assert_eq!( + mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"), + vec![5] + ); assert!(mutator.head_state(h1a).storage(&key).unwrap().is_none()); assert!(mutator.head_state(h2b).storage(&key).unwrap().is_none()); assert!(mutator.head_state(h1b).storage(&key).unwrap().is_none()); @@ -1862,18 +1797,17 @@ mod qc { for key in Mutator::key_permutations() { match (head_state.storage(&key).unwrap(), mutator.head_storage_ref().get(&key)) { - (Some(x), Some(y)) => { + (Some(x), Some(y)) => if Some(&x) != y.as_ref() { eprintln!("{:?} != {:?}", x, y); - return false; - } - }, + return false + }, (None, Some(_y)) => { // TODO: cache miss is not tracked atm }, (Some(x), None) => { eprintln!("{:?} != ", x); - return false; + return false }, _ => continue, } @@ -1886,18 +1820,17 @@ mod qc { let head_state = mutator.head_state(node.hash); for key in Mutator::key_permutations() { match (head_state.storage(&key).unwrap(), node.state.get(&key)) { - (Some(x), Some(y)) => { + (Some(x), Some(y)) => if Some(&x) != y.as_ref() { eprintln!("at [{}]: {:?} != {:?}", node.hash, x, y); - return false; - } - }, + return false + }, (None, Some(_y)) => { // cache miss is not tracked atm }, (Some(x), None) => { eprintln!("at [{}]: {:?} != ", node.hash, x); - return false; + return false }, _ => continue, } @@ -1918,16 +1851,27 @@ mod qc { let mut mutator = Mutator::new_empty(); mutator.mutate_static(Action::Next { hash: h0, changes: vec![] }); mutator.mutate_static(Action::Next { hash: h1, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] }); + mutator + .mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h1b, + changes: vec![(key.clone(), Some(vec![3]))], + }); mutator.mutate_static(Action::ReorgWithImport { depth: 2, hash: h2b }); assert!(is_head_match(&mutator)) } - fn key(k: u8) -> Vec { vec![k] } - fn val(v: u8) -> Option> { Some(vec![v]) } - fn keyval(k: u8, v: u8) -> KeySet { vec![(key(k), val(v))] } + fn key(k: u8) -> Vec { + vec![k] + } + fn val(v: u8) -> Option> { + Some(vec![v]) + } + fn keyval(k: u8, v: u8) -> KeySet { + vec![(key(k), val(v))] + } #[test] fn reorg2() { @@ -1941,7 +1885,7 @@ mod qc { let mut mutator = Mutator::new_empty(); mutator.mutate_static(Action::Next { hash: h0, changes: keyval(1, 1) }); mutator.mutate_static(Action::Next { hash: h1a, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2 ) }); + mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2) }); mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(3, 3) }); mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(4, 4) }); diff --git a/substrate/client/db/src/upgrade.rs b/substrate/client/db/src/upgrade.rs index ea91b8253e1d8aa81ab3c9db6861f442da124619..fe0abaed1b07a346d9aaa9316c423c506c466a91 100644 --- a/substrate/client/db/src/upgrade.rs +++ b/substrate/client/db/src/upgrade.rs @@ -18,14 +18,16 @@ //! Database upgrade logic. -use std::fs; -use std::io::{Read, Write, ErrorKind}; -use std::path::{Path, PathBuf}; +use std::{ + fs, + io::{ErrorKind, Read, Write}, + path::{Path, PathBuf}, +}; -use sp_runtime::traits::Block as BlockT; use crate::{columns, utils::DatabaseType}; -use kvdb_rocksdb::{Database, DatabaseConfig}; use codec::{Decode, Encode}; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use sp_runtime::traits::Block as BlockT; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; @@ -38,19 +40,28 @@ const V1_NUM_COLUMNS: u32 = 11; const V2_NUM_COLUMNS: u32 = 12; /// Upgrade database to current version. -pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { +pub fn upgrade_db( + db_path: &Path, + db_type: DatabaseType, +) -> sp_blockchain::Result<()> { let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); if !is_empty { let db_version = current_version(db_path)?; match db_version { - 0 => Err(sp_blockchain::Error::Backend(format!("Unsupported database version: {}", db_version)))?, + 0 => Err(sp_blockchain::Error::Backend(format!( + "Unsupported database version: {}", + db_version + )))?, 1 => { migrate_1_to_2::(db_path, db_type)?; migrate_2_to_3::(db_path, db_type)? }, 2 => migrate_2_to_3::(db_path, db_type)?, CURRENT_VERSION => (), - _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, + _ => Err(sp_blockchain::Error::Backend(format!( + "Future database version: {}", + db_version + )))?, } } @@ -60,8 +71,12 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_bl /// Migration from version1 to version2: /// 1) the number of columns has changed from 11 to 12; /// 2) transactions column is added; -fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { - let db_path = db_path.to_str() +fn migrate_1_to_2( + db_path: &Path, + _db_type: DatabaseType, +) -> sp_blockchain::Result<()> { + let db_path = db_path + .to_str() .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; let db_cfg = DatabaseConfig::with_columns(V1_NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).map_err(db_err)?; @@ -70,8 +85,12 @@ fn migrate_1_to_2(db_path: &Path, _db_type: DatabaseType) -> sp_b /// Migration from version2 to version3: /// - The format of the stored Justification changed to support multiple Justifications. -fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { - let db_path = db_path.to_str() +fn migrate_2_to_3( + db_path: &Path, + _db_type: DatabaseType, +) -> sp_blockchain::Result<()> { + let db_path = db_path + .to_str() .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).map_err(db_err)?; @@ -137,10 +156,11 @@ fn version_file_path(path: &Path) -> PathBuf { #[cfg(test)] mod tests { - use sc_state_db::PruningMode; - use crate::{DatabaseSettings, DatabaseSettingsSrc, KeepBlocks, TransactionStorageMode}; - use crate::tests::Block; use super::*; + use crate::{ + tests::Block, DatabaseSettings, DatabaseSettingsSrc, KeepBlocks, TransactionStorageMode, + }; + use sc_state_db::PruningMode; fn create_db(db_path: &Path, version: Option) { if let Some(version) = version { @@ -151,14 +171,18 @@ mod tests { } fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { - crate::utils::open_database::(&DatabaseSettings { - state_cache_size: 0, - state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::RocksDb { path: db_path.to_owned(), cache_size: 128 }, - keep_blocks: KeepBlocks::All, - transaction_storage: TransactionStorageMode::BlockBody, - }, DatabaseType::Full).map(|_| ()) + crate::utils::open_database::( + &DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + source: DatabaseSettingsSrc::RocksDb { path: db_path.to_owned(), cache_size: 128 }, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + }, + DatabaseType::Full, + ) + .map(|_| ()) } #[test] diff --git a/substrate/client/db/src/utils.rs b/substrate/client/db/src/utils.rs index bd6dc9841aa6373cde0da4fe86be1dfb0f0ee166..fc2324f35af6794ab05229f80717d0cdd1b7a69f 100644 --- a/substrate/client/db/src/utils.rs +++ b/substrate/client/db/src/utils.rs @@ -19,24 +19,27 @@ //! Db-based backend utility structures and functions, used by both //! full and light storages. -use std::sync::Arc; -use std::convert::TryInto; +use std::{convert::TryInto, sync::Arc}; use log::debug; +use crate::{Database, DatabaseSettings, DatabaseSettingsSrc, DbHash}; use codec::Decode; -use sp_trie::DBValue; use sp_database::Transaction; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, Zero, - UniqueSaturatedFrom, UniqueSaturatedInto, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero}, }; -use crate::{DatabaseSettings, DatabaseSettingsSrc, Database, DbHash}; +use sp_trie::DBValue; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -#[cfg(any(feature = "with-kvdb-rocksdb", feature = "with-parity-db", feature = "test-helpers", test))] +#[cfg(any( + feature = "with-kvdb-rocksdb", + feature = "with-parity-db", + feature = "test-helpers", + test +))] pub const NUM_COLUMNS: u32 = 12; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; @@ -98,24 +101,17 @@ pub enum DatabaseType { /// In the current database schema, this kind of key is only used for /// lookups into an index, NOT for storing header data or others. pub fn number_index_key>(n: N) -> sp_blockchain::Result { - let n = n.try_into().map_err(|_| + let n = n.try_into().map_err(|_| { sp_blockchain::Error::Backend("Block number cannot be converted to u32".into()) - )?; - - Ok([ - (n >> 24) as u8, - ((n >> 16) & 0xff) as u8, - ((n >> 8) & 0xff) as u8, - (n & 0xff) as u8 - ]) + })?; + + Ok([(n >> 24) as u8, ((n >> 16) & 0xff) as u8, ((n >> 8) & 0xff) as u8, (n & 0xff) as u8]) } /// Convert number and hash into long lookup key for blocks that are /// not in the canonical chain. -pub fn number_and_hash_to_lookup_key( - number: N, - hash: H, -) -> sp_blockchain::Result> where +pub fn number_and_hash_to_lookup_key(number: N, hash: H) -> sp_blockchain::Result> +where N: TryInto, H: AsRef<[u8]>, { @@ -126,16 +122,15 @@ pub fn number_and_hash_to_lookup_key( /// Convert block lookup key into block number. /// all block lookup keys start with the block number. -pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result where - N: From +pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result +where + N: From, { if key.len() < 4 { - return Err(sp_blockchain::Error::Backend("Invalid block key".into())); + return Err(sp_blockchain::Error::Backend("Invalid block key".into())) } - Ok((key[0] as u32) << 24 - | (key[1] as u32) << 16 - | (key[2] as u32) << 8 - | (key[3] as u32)).map(Into::into) + Ok((key[0] as u32) << 24 | (key[1] as u32) << 16 | (key[2] as u32) << 8 | (key[3] as u32)) + .map(Into::into) } /// Delete number to hash mapping in DB transaction. @@ -197,17 +192,15 @@ pub fn insert_hash_to_key_mapping, H: AsRef<[u8]> + Clone>( pub fn block_id_to_lookup_key( db: &dyn Database, key_lookup_col: u32, - id: BlockId -) -> Result>, sp_blockchain::Error> where + id: BlockId, +) -> Result>, sp_blockchain::Error> +where Block: BlockT, ::sp_runtime::traits::NumberFor: UniqueSaturatedFrom + UniqueSaturatedInto, { Ok(match id { - BlockId::Number(n) => db.get( - key_lookup_col, - number_index_key(n)?.as_ref(), - ), - BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()) + BlockId::Number(n) => db.get(key_lookup_col, number_index_key(n)?.as_ref()), + BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()), }) } @@ -218,9 +211,10 @@ pub fn open_database( ) -> sp_blockchain::Result>> { #[allow(unused)] fn db_open_error(feat: &'static str) -> sp_blockchain::Error { - sp_blockchain::Error::Backend( - format!("`{}` feature not enabled, database can not be opened", feat), - ) + sp_blockchain::Error::Backend(format!( + "`{}` feature not enabled, database can not be opened", + feat + )) } let db: Arc> = match &config.source { @@ -231,14 +225,16 @@ pub fn open_database( // and now open database assuming that it has the latest version let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); - let path = path.to_str() + let path = path + .to_str() .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; let mut memory_budget = std::collections::HashMap::new(); match db_type { DatabaseType::Full => { let state_col_budget = (*cache_size as f64 * 0.9) as usize; - let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); + let other_col_budget = + (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); for i in 0..NUM_COLUMNS { if i == crate::columns::STATE { @@ -267,7 +263,7 @@ pub fn open_database( path, col_budget, ); - } + }, } db_config.memory_budget = memory_budget; @@ -276,18 +272,12 @@ pub fn open_database( sp_database::as_database(db) }, #[cfg(not(any(feature = "with-kvdb-rocksdb", test)))] - DatabaseSettingsSrc::RocksDb { .. } => { - return Err(db_open_error("with-kvdb-rocksdb")); - }, + DatabaseSettingsSrc::RocksDb { .. } => return Err(db_open_error("with-kvdb-rocksdb")), #[cfg(feature = "with-parity-db")] - DatabaseSettingsSrc::ParityDb { path } => { - crate::parity_db::open(&path, db_type) - .map_err(|e| sp_blockchain::Error::Backend(format!("{}", e)))? - }, + DatabaseSettingsSrc::ParityDb { path } => crate::parity_db::open(&path, db_type) + .map_err(|e| sp_blockchain::Error::Backend(format!("{}", e)))?, #[cfg(not(feature = "with-parity-db"))] - DatabaseSettingsSrc::ParityDb { .. } => { - return Err(db_open_error("with-parity-db")) - }, + DatabaseSettingsSrc::ParityDb { .. } => return Err(db_open_error("with-parity-db")), DatabaseSettingsSrc::Custom(db) => db.clone(), }; @@ -297,14 +287,19 @@ pub fn open_database( } /// Check database type. -pub fn check_database_type(db: &dyn Database, db_type: DatabaseType) -> sp_blockchain::Result<()> { +pub fn check_database_type( + db: &dyn Database, + db_type: DatabaseType, +) -> sp_blockchain::Result<()> { match db.get(COLUMN_META, meta_keys::TYPE) { - Some(stored_type) => { + Some(stored_type) => if db_type.as_str().as_bytes() != &*stored_type { - return Err(sp_blockchain::Error::Backend( - format!("Unexpected database type. Expected: {}", db_type.as_str())).into()); - } - }, + return Err(sp_blockchain::Error::Backend(format!( + "Unexpected database type. Expected: {}", + db_type.as_str() + )) + .into()) + }, None => { let mut transaction = Transaction::new(); transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); @@ -320,10 +315,10 @@ pub fn read_db( db: &dyn Database, col_index: u32, col: u32, - id: BlockId + id: BlockId, ) -> sp_blockchain::Result> - where - Block: BlockT, +where + Block: BlockT, { block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { Some(key) => Ok(db.get(col, key.as_ref())), @@ -358,10 +353,8 @@ pub fn read_header( match read_db(db, col_index, col, id)? { Some(header) => match Block::Header::decode(&mut &header[..]) { Ok(header) => Ok(Some(header)), - Err(_) => return Err( - sp_blockchain::Error::Backend("Error decoding header".into()) - ), - } + Err(_) => return Err(sp_blockchain::Error::Backend("Error decoding header".into())), + }, None => Ok(None), } } @@ -373,34 +366,35 @@ pub fn require_header( col: u32, id: BlockId, ) -> sp_blockchain::Result { - read_header(db, col_index, col, id) - .and_then(|header| header.ok_or_else(|| - sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id)) - )) + read_header(db, col_index, col, id).and_then(|header| { + header.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id))) + }) } /// Read meta from the database. -pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< - Meta<<::Header as HeaderT>::Number, Block::Hash>, - sp_blockchain::Error, -> - where - Block: BlockT, +pub fn read_meta( + db: &dyn Database, + col_header: u32, +) -> Result::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error> +where + Block: BlockT, { let genesis_hash: Block::Hash = match read_genesis_hash(db)? { Some(genesis_hash) => genesis_hash, - None => return Ok(Meta { - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - finalized_state: None, - }), + None => + return Ok(Meta { + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + finalized_state: None, + }), }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = db.get(COLUMN_META, key) + if let Some(Some(header)) = db + .get(COLUMN_META, key) .and_then(|id| db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok())) { let hash = header.hash(); @@ -419,7 +413,8 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; - let (finalized_state_hash, finalized_state_number) = load_meta_block("final_state", meta_keys::FINALIZED_STATE)?; + let (finalized_state_hash, finalized_state_number) = + load_meta_block("final_state", meta_keys::FINALIZED_STATE)?; let finalized_state = if finalized_state_hash != Default::default() { Some((finalized_state_hash, finalized_state_number)) } else { @@ -437,13 +432,14 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< } /// Read genesis hash from database. -pub fn read_genesis_hash(db: &dyn Database) -> sp_blockchain::Result> { +pub fn read_genesis_hash( + db: &dyn Database, +) -> sp_blockchain::Result> { match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { Some(h) => match Decode::decode(&mut &h[..]) { Ok(h) => Ok(Some(h)), - Err(err) => Err(sp_blockchain::Error::Backend( - format!("Error decoding genesis hash: {}", err) - )), + Err(err) => + Err(sp_blockchain::Error::Backend(format!("Error decoding genesis hash: {}", err))), }, None => Ok(None), } @@ -461,7 +457,7 @@ impl DatabaseType { pub(crate) struct JoinInput<'a, 'b>(&'a [u8], &'b [u8]); -pub(crate) fn join_input<'a, 'b>(i1: &'a[u8], i2: &'b [u8]) -> JoinInput<'a, 'b> { +pub(crate) fn join_input<'a, 'b>(i1: &'a [u8], i2: &'b [u8]) -> JoinInput<'a, 'b> { JoinInput(i1, i2) } @@ -486,8 +482,8 @@ impl<'a, 'b> codec::Input for JoinInput<'a, 'b> { #[cfg(test)] mod tests { use super::*; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; use codec::Input; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; type Block = RawBlock>; #[test] diff --git a/substrate/client/executor/common/src/lib.rs b/substrate/client/executor/common/src/lib.rs index 25e06314aba3964e2498e45b9575c2351524091c..ef73ecd90e2853a83739df055fb6634ca9e77a86 100644 --- a/substrate/client/executor/common/src/lib.rs +++ b/substrate/client/executor/common/src/lib.rs @@ -22,6 +22,6 @@ #![deny(unused_crate_dependencies)] pub mod error; +pub mod runtime_blob; pub mod sandbox; pub mod wasm_runtime; -pub mod runtime_blob; diff --git a/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs b/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs index 269ad0858325b066ebda673e15b26249e1df7b1d..5c3fedbdc963edb3771c8a0b0851f0b75d3a018e 100644 --- a/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs +++ b/substrate/client/executor/common/src/runtime_blob/data_segments_snapshot.rs @@ -16,10 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error::{self, Error}; use super::RuntimeBlob; -use std::mem; +use crate::error::{self, Error}; use pwasm_utils::parity_wasm::elements::Instruction; +use std::mem; /// This is a snapshot of data segments specialzied for a particular instantiation. /// @@ -49,7 +49,7 @@ impl DataSegmentsSnapshot { // [op, End] if init_expr.len() != 2 { - return Err(Error::InitializerHasTooManyExpressions); + return Err(Error::InitializerHasTooManyExpressions) } let offset = match &init_expr[0] { Instruction::I32Const(v) => *v as u32, @@ -60,8 +60,8 @@ impl DataSegmentsSnapshot { // At the moment of writing the Substrate Runtime Interface does not provide // any globals. There is nothing that prevents us from supporting this // if/when we gain those. - return Err(Error::ImportedGlobalsUnsupported); - } + return Err(Error::ImportedGlobalsUnsupported) + }, insn => return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))), }; diff --git a/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs b/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs index acdefef2e64ef854644bbd38d2522719665ac608..6a29ff8bae365aa7d551c446d28695af241b04c0 100644 --- a/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs +++ b/substrate/client/executor/common/src/runtime_blob/globals_snapshot.rs @@ -50,17 +50,14 @@ pub trait InstanceGlobals { /// a runtime blob that was instrumented by /// [`RuntimeBlob::expose_mutable_globals`](super::RuntimeBlob::expose_mutable_globals`). -/// /// If the code wasn't instrumented then it would be empty and snapshot would do nothing. pub struct ExposedMutableGlobalsSet(Vec); impl ExposedMutableGlobalsSet { /// Collect the set from the given runtime blob. See the struct documentation for details. pub fn collect(runtime_blob: &RuntimeBlob) -> Self { - let global_names = runtime_blob - .exported_internal_global_names() - .map(ToOwned::to_owned) - .collect(); + let global_names = + runtime_blob.exported_internal_global_names().map(ToOwned::to_owned).collect(); Self(global_names) } } diff --git a/substrate/client/executor/common/src/runtime_blob/mod.rs b/substrate/client/executor/common/src/runtime_blob/mod.rs index 372df7bd97eb73ab64d46d0beac85f6416edac44..43d6e5e7a0dfb859e252b3df05df2713c6fa8be9 100644 --- a/substrate/client/executor/common/src/runtime_blob/mod.rs +++ b/substrate/client/executor/common/src/runtime_blob/mod.rs @@ -53,5 +53,5 @@ mod globals_snapshot; mod runtime_blob; pub use data_segments_snapshot::DataSegmentsSnapshot; -pub use globals_snapshot::{GlobalsSnapshot, ExposedMutableGlobalsSet, InstanceGlobals}; +pub use globals_snapshot::{ExposedMutableGlobalsSet, GlobalsSnapshot, InstanceGlobals}; pub use runtime_blob::RuntimeBlob; diff --git a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs index e7fc15bb13e19d90582b3eabdcb532b47f5f05d6..b7f71193449cb2f298756dfd3b3a9b8c2a273cbb 100644 --- a/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs +++ b/substrate/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -16,13 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::error::WasmError; use pwasm_utils::{ - parity_wasm::elements::{ - DataSegment, Module, deserialize_buffer, serialize, Internal, - }, export_mutable_globals, + parity_wasm::elements::{deserialize_buffer, serialize, DataSegment, Internal, Module}, }; -use crate::error::WasmError; /// A bunch of information collected from a WebAssembly module. #[derive(Clone)] @@ -53,11 +51,7 @@ impl RuntimeBlob { /// Extract the data segments from the given wasm code. pub(super) fn data_segments(&self) -> Vec { - self.raw_module - .data_section() - .map(|ds| ds.entries()) - .unwrap_or(&[]) - .to_vec() + self.raw_module.data_section().map(|ds| ds.entries()).unwrap_or(&[]).to_vec() } /// The number of globals defined in locally in this module. @@ -70,10 +64,7 @@ impl RuntimeBlob { /// The number of imports of globals. pub fn imported_globals_count(&self) -> u32 { - self.raw_module - .import_section() - .map(|is| is.globals() as u32) - .unwrap_or(0) + self.raw_module.import_section().map(|is| is.globals() as u32).unwrap_or(0) } /// Perform an instrumentation that makes sure that the mutable globals are exported. @@ -95,35 +86,29 @@ impl RuntimeBlob { |e| WasmError::Other(format!("cannot inject the stack limiter: {:?}", e)), )?; - Ok(Self { - raw_module: injected_module, - }) + Ok(Self { raw_module: injected_module }) } /// Perform an instrumentation that makes sure that a specific function `entry_point` is exported pub fn entry_point_exists(&self, entry_point: &str) -> bool { - self.raw_module.export_section().map(|e| { - e.entries() - .iter() - .any(|e| matches!(e.internal(), Internal::Function(_)) && e.field() == entry_point) - }).unwrap_or_default() + self.raw_module + .export_section() + .map(|e| { + e.entries().iter().any(|e| { + matches!(e.internal(), Internal::Function(_)) && e.field() == entry_point + }) + }) + .unwrap_or_default() } /// Returns an iterator of all globals which were exported by [`expose_mutable_globals`]. pub(super) fn exported_internal_global_names<'module>( &'module self, ) -> impl Iterator { - let exports = self - .raw_module - .export_section() - .map(|es| es.entries()) - .unwrap_or(&[]); + let exports = self.raw_module.export_section().map(|es| es.entries()).unwrap_or(&[]); exports.iter().filter_map(|export| match export.internal() { - Internal::Global(_) - if export.field().starts_with("exported_internal_global") => - { - Some(export.field()) - } + Internal::Global(_) if export.field().starts_with("exported_internal_global") => + Some(export.field()), _ => None, }) } @@ -135,12 +120,11 @@ impl RuntimeBlob { .custom_sections() .find(|cs| cs.name() == section_name) .map(|cs| cs.payload()) - } + } /// Consumes this runtime blob and serializes it. pub fn serialize(self) -> Vec { - serialize(self.raw_module) - .expect("serializing into a vec should succeed; qed") + serialize(self.raw_module).expect("serializing into a vec should succeed; qed") } /// Destructure this structure into the underlying parity-wasm Module. diff --git a/substrate/client/executor/common/src/sandbox.rs b/substrate/client/executor/common/src/sandbox.rs index b7838aab7f3481b7bb4e15ac625bb1aac860cbc4..63f9cc4f258e8cea3f195d15321e2192406c62bc 100644 --- a/substrate/client/executor/common/src/sandbox.rs +++ b/substrate/client/executor/common/src/sandbox.rs @@ -21,15 +21,15 @@ //! Sandboxing is baked by wasmi at the moment. In future, however, we would like to add/switch to //! a compiled execution engine. -use crate::error::{Result, Error}; -use std::{collections::HashMap, rc::Rc}; +use crate::error::{Error, Result}; use codec::{Decode, Encode}; use sp_core::sandbox as sandbox_primitives; +use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; +use std::{collections::HashMap, rc::Rc}; use wasmi::{ - Externals, ImportResolver, MemoryInstance, MemoryRef, Module, ModuleInstance, - ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, memory_units::Pages, + memory_units::Pages, Externals, ImportResolver, MemoryInstance, MemoryRef, Module, + ModuleInstance, ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, }; -use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; /// Index of a function inside the supervisor. /// @@ -83,15 +83,9 @@ impl ImportResolver for Imports { field_name: &str, signature: &::wasmi::Signature, ) -> std::result::Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); + let key = (module_name.as_bytes().to_owned(), field_name.as_bytes().to_owned()); let idx = *self.func_map.get(&key).ok_or_else(|| { - wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - )) + wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) })?; Ok(wasmi::FuncInstance::alloc_host(signature.clone(), idx.0)) } @@ -102,11 +96,9 @@ impl ImportResolver for Imports { field_name: &str, _memory_type: &::wasmi::MemoryDescriptor, ) -> std::result::Result { - let key = ( - module_name.as_bytes().to_vec(), - field_name.as_bytes().to_vec(), - ); - let mem = self.memories_map + let key = (module_name.as_bytes().to_vec(), field_name.as_bytes().to_vec()); + let mem = self + .memories_map .get(&key) .ok_or_else(|| { wasmi::Error::Instantiation(format!( @@ -124,10 +116,7 @@ impl ImportResolver for Imports { field_name: &str, _global_type: &::wasmi::GlobalDescriptor, ) -> std::result::Result { - Err(wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) + Err(wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name))) } fn resolve_table( @@ -136,10 +125,7 @@ impl ImportResolver for Imports { field_name: &str, _table_type: &::wasmi::TableDescriptor, ) -> std::result::Result { - Err(wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) + Err(wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name))) } } @@ -187,7 +173,9 @@ fn trap(msg: &'static str) -> Trap { TrapKind::Host(Box::new(Error::Other(msg.into()))).into() } -fn deserialize_result(mut serialized_result: &[u8]) -> std::result::Result, Trap> { +fn deserialize_result( + mut serialized_result: &[u8], +) -> std::result::Result, Trap> { use self::sandbox_primitives::HostError; use sp_wasm_interface::ReturnValue; let result_val = std::result::Result::::decode(&mut serialized_result) @@ -222,7 +210,8 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { ); // Serialize arguments into a byte vector. - let invoke_args_data: Vec = args.as_ref() + let invoke_args_data: Vec = args + .as_ref() .iter() .cloned() .map(sp_wasm_interface::Value::from) @@ -240,10 +229,7 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { .map_err(|_| trap("Can't allocate memory in supervisor for the arguments"))?; let deallocate = |this: &mut GuestExternals, ptr, fail_msg| { - this - .supervisor_externals - .deallocate_memory(ptr) - .map_err(|_| trap(fail_msg)) + this.supervisor_externals.deallocate_memory(ptr).map_err(|_| trap(fail_msg)) }; if self @@ -251,8 +237,12 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { .write_memory(invoke_args_ptr, &invoke_args_data) .is_err() { - deallocate(self, invoke_args_ptr, "Failed dealloction after failed write of invoke arguments")?; - return Err(trap("Can't write invoke args into memory")); + deallocate( + self, + invoke_args_ptr, + "Failed dealloction after failed write of invoke arguments", + )?; + return Err(trap("Can't write invoke args into memory")) } let result = self.supervisor_externals.invoke( @@ -263,7 +253,11 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { func_idx, ); - deallocate(self, invoke_args_ptr, "Can't deallocate memory for dispatch thunk's invoke arguments")?; + deallocate( + self, + invoke_args_ptr, + "Can't deallocate memory for dispatch thunk's invoke arguments", + )?; let result = result?; // dispatch_thunk returns pointer to serialized arguments. @@ -276,13 +270,18 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { (Pointer::new(ptr), len) }; - let serialized_result_val = self.supervisor_externals + let serialized_result_val = self + .supervisor_externals .read_memory(serialized_result_val_ptr, serialized_result_val_len) .map_err(|_| trap("Can't read the serialized result from dispatch thunk")); - deallocate(self, serialized_result_val_ptr, "Can't deallocate memory for dispatch thunk's result") - .and_then(|_| serialized_result_val) - .and_then(|serialized_result_val| deserialize_result(&serialized_result_val)) + deallocate( + self, + serialized_result_val_ptr, + "Can't deallocate memory for dispatch thunk's result", + ) + .and_then(|_| serialized_result_val) + .and_then(|serialized_result_val| deserialize_result(&serialized_result_val)) } } @@ -296,11 +295,7 @@ where FE: SandboxCapabilities, F: FnOnce(&mut GuestExternals) -> R, { - let mut guest_externals = GuestExternals { - supervisor_externals, - sandbox_instance, - state, - }; + let mut guest_externals = GuestExternals { supervisor_externals, sandbox_instance, state }; f(&mut guest_externals) } @@ -332,32 +327,23 @@ impl SandboxInstance { /// /// The `state` parameter can be used to provide custom data for /// these syscall implementations. - pub fn invoke>( + pub fn invoke>( &self, export_name: &str, args: &[RuntimeValue], supervisor_externals: &mut FE, state: u32, ) -> std::result::Result, wasmi::Error> { - with_guest_externals( - supervisor_externals, - self, - state, - |guest_externals| { - self.instance - .invoke_export(export_name, args, guest_externals) - }, - ) + with_guest_externals(supervisor_externals, self, state, |guest_externals| { + self.instance.invoke_export(export_name, args, guest_externals) + }) } /// Get the value from a global with the given `name`. /// /// Returns `Some(_)` if the global could be found. pub fn get_global_val(&self, name: &str) -> Option { - let global = self.instance - .export_by_name(name)? - .as_global()? - .get(); + let global = self.instance.export_by_name(name)?.as_global()?.get(); Some(global.into()) } @@ -398,7 +384,7 @@ fn decode_environment_definition( let externals_idx = guest_to_supervisor_mapping.define(SupervisorFuncIndex(func_idx as usize)); func_map.insert((module, field), externals_idx); - } + }, sandbox_primitives::ExternEntity::Memory(memory_idx) => { let memory_ref = memories .get(memory_idx as usize) @@ -406,17 +392,11 @@ fn decode_environment_definition( .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)? .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)?; memories_map.insert((module, field), memory_ref); - } + }, } } - Ok(( - Imports { - func_map, - memories_map, - }, - guest_to_supervisor_mapping, - )) + Ok((Imports { func_map, memories_map }, guest_to_supervisor_mapping)) } /// An environment in which the guest module is instantiated. @@ -435,10 +415,7 @@ impl GuestEnvironment { ) -> std::result::Result { let (imports, guest_to_supervisor_mapping) = decode_environment_definition(raw_env_def, &store.memories)?; - Ok(Self { - imports, - guest_to_supervisor_mapping, - }) + Ok(Self { imports, guest_to_supervisor_mapping }) } } @@ -493,16 +470,11 @@ pub fn instantiate<'a, FE: SandboxCapabilities>( guest_to_supervisor_mapping: host_env.guest_to_supervisor_mapping, }); - with_guest_externals( - supervisor_externals, - &sandbox_instance, - state, - |guest_externals| { - instance - .run_start(guest_externals) - .map_err(|_| InstantiationError::StartTrapped) - }, - )?; + with_guest_externals(supervisor_externals, &sandbox_instance, state, |guest_externals| { + instance + .run_start(guest_externals) + .map_err(|_| InstantiationError::StartTrapped) + })?; Ok(UnregisteredInstance { sandbox_instance }) } @@ -519,10 +491,7 @@ pub struct Store { impl Store { /// Create a new empty sandbox store. pub fn new() -> Self { - Store { - instances: Vec::new(), - memories: Vec::new(), - } + Store { instances: Vec::new(), memories: Vec::new() } } /// Create a new memory instance and return it's index. @@ -537,11 +506,7 @@ impl Store { specified_limit => Some(Pages(specified_limit as usize)), }; - let mem = - MemoryInstance::alloc( - Pages(initial as usize), - maximum, - )?; + let mem = MemoryInstance::alloc(Pages(initial as usize), maximum)?; let mem_idx = self.memories.len(); self.memories.push(Some(mem)); @@ -589,7 +554,7 @@ impl Store { Some(memory) => { *memory = None; Ok(()) - } + }, } } @@ -606,7 +571,7 @@ impl Store { Some(instance) => { *instance = None; Ok(()) - } + }, } } diff --git a/substrate/client/executor/runtime-test/src/lib.rs b/substrate/client/executor/runtime-test/src/lib.rs index c37766832b4611465d9d5ea113a1a5f9067dfb22..11771b183e3c4ceedda33b2b384c859e92eb3422 100644 --- a/substrate/client/executor/runtime-test/src/lib.rs +++ b/substrate/client/executor/runtime-test/src/lib.rs @@ -7,22 +7,28 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only \ + supported with the flag disabled.", + ) } #[cfg(not(feature = "std"))] -use sp_std::{vec::Vec, vec}; +use sp_std::{vec, vec::Vec}; +#[cfg(not(feature = "std"))] +use sp_core::{ed25519, sr25519}; #[cfg(not(feature = "std"))] use sp_io::{ - storage, hashing::{blake2_128, blake2_256, sha2_256, twox_128, twox_256}, - crypto::{ed25519_verify, sr25519_verify}, wasm_tracing, + crypto::{ed25519_verify, sr25519_verify}, + hashing::{blake2_128, blake2_256, sha2_256, twox_128, twox_256}, + storage, wasm_tracing, }; #[cfg(not(feature = "std"))] -use sp_runtime::{print, traits::{BlakeTwo256, Hash}}; -#[cfg(not(feature = "std"))] -use sp_core::{ed25519, sr25519}; +use sp_runtime::{ + print, + traits::{BlakeTwo256, Hash}, +}; #[cfg(not(feature = "std"))] use sp_sandbox::Value; @@ -48,347 +54,347 @@ static mut MUTABLE_STATIC: u64 = 32; static mut MUTABLE_STATIC_BSS: u64 = 0; sp_core::wasm_export_functions! { - fn test_calling_missing_external() { - unsafe { missing_external() } - } - - fn test_calling_yet_another_missing_external() { - unsafe { yet_another_missing_external() } - } - - fn test_data_in(input: Vec) -> Vec { - print("set_storage"); - storage::set(b"input", &input); - - print("storage"); - let foo = storage::get(b"foo").unwrap(); - - print("set_storage"); - storage::set(b"baz", &foo); - - print("finished!"); - b"all ok!".to_vec() - } - - fn test_clear_prefix(input: Vec) -> Vec { - storage::clear_prefix(&input, None); - b"all ok!".to_vec() - } - - fn test_empty_return() {} - - fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { - // This piece of code will dirty multiple pages of memory. The number of pages is given by - // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared - // is a wasm page that that follows the one that holds the `heap_base` address. - // - // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take - // 16 writes to process a single wasm page. - - let mut heap_ptr = heap_base as usize; - - // Find the next wasm page boundary. - let heap_ptr = round_up_to(heap_ptr, 65536); - - // Make it an actual pointer - let heap_ptr = heap_ptr as *mut u8; - - // Traverse the host pages and make each one dirty - let host_pages = heap_pages as usize * 16; - for i in 0..host_pages { - unsafe { - // technically this is an UB, but there is no way Rust can find this out. - heap_ptr.add(i * 4096).write(0); - } - } - - fn round_up_to(n: usize, divisor: usize) -> usize { - (n + divisor - 1) / divisor - } - } - - fn test_exhaust_heap() -> Vec { Vec::with_capacity(16777216) } - - fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { - let a = f32::from_le_bytes(a); - let b = f32::from_le_bytes(b); - f32::to_le_bytes(a + b) - } - - fn test_panic() { panic!("test panic") } - - fn test_conditional_panic(input: Vec) -> Vec { - if input.len() > 0 { - panic!("test panic") - } - - input - } - - fn test_blake2_256(input: Vec) -> Vec { - blake2_256(&input).to_vec() - } - - fn test_blake2_128(input: Vec) -> Vec { - blake2_128(&input).to_vec() - } - - fn test_sha2_256(input: Vec) -> Vec { - sha2_256(&input).to_vec() - } - - fn test_twox_256(input: Vec) -> Vec { - twox_256(&input).to_vec() - } - - fn test_twox_128(input: Vec) -> Vec { - twox_128(&input).to_vec() - } - - fn test_ed25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) - } - - fn test_sr25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) - } - - fn test_ordered_trie_root() -> Vec { - BlakeTwo256::ordered_trie_root( - vec![ - b"zero"[..].into(), - b"one"[..].into(), - b"two"[..].into(), - ], - ).as_ref().to_vec() - } - - fn test_sandbox(code: Vec) -> bool { - execute_sandboxed(&code, &[]).is_ok() - } + fn test_calling_missing_external() { + unsafe { missing_external() } + } - fn test_sandbox_args(code: Vec) -> bool { - execute_sandboxed( - &code, - &[ - Value::I32(0x12345678), - Value::I64(0x1234567887654321), - ], - ).is_ok() - } - - fn test_sandbox_return_val(code: Vec) -> bool { - let ok = match execute_sandboxed( - &code, - &[ - Value::I32(0x1336), - ] - ) { - Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, - _ => false, - }; - - ok - } - - fn test_sandbox_instantiate(code: Vec) -> u8 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let code = match sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - Ok(_) => 0, - Err(sp_sandbox::Error::Module) => 1, - Err(sp_sandbox::Error::Execution) => 2, - Err(sp_sandbox::Error::OutOfBounds) => 3, - }; - - code - } - - fn test_sandbox_get_global_val(code: Vec) -> i64 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - i - } else { - return 20; - }; - - match instance.get_global_val("test_global") { - Some(sp_sandbox::Value::I64(val)) => val, - None => 30, - val => 40, - } - } - - fn test_offchain_index_set() { - sp_io::offchain_index::set(b"k", b"v"); - } - - fn test_offchain_local_storage() -> bool { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - sp_io::offchain::local_storage_set(kind, b"test", b"asd"); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); - - let res = sp_io::offchain::local_storage_compare_and_set( - kind, - b"test", - Some(b"asd".to_vec()), - b"", - ); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); - res - } - - fn test_offchain_local_storage_with_none() { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - - let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); - assert_eq!(res, true); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); - } + fn test_calling_yet_another_missing_external() { + unsafe { yet_another_missing_external() } + } - fn test_offchain_http() -> bool { - use sp_core::offchain::HttpRequestStatus; - let run = || -> Option<()> { - let id = sp_io::offchain::http_request_start( - "POST", - "http://localhost:12345", - &[], - ).ok()?; - sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; - sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; - sp_io::offchain::http_request_write_body(id, &[], None).ok()?; - let status = sp_io::offchain::http_response_wait(&[id], None); - assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); - let headers = sp_io::offchain::http_response_headers(id); - assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); - let mut buffer = vec![0; 64]; - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 3); - assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 0); - - Some(()) - }; + fn test_data_in(input: Vec) -> Vec { + print("set_storage"); + storage::set(b"input", &input); - run().is_some() - } + print("storage"); + let foo = storage::get(b"foo").unwrap(); - fn test_enter_span() -> u64 { - wasm_tracing::enter_span(Default::default()) - } + print("set_storage"); + storage::set(b"baz", &foo); - fn test_exit_span(span_id: u64) { - wasm_tracing::exit(span_id) - } + print("finished!"); + b"all ok!".to_vec() + } - fn test_nested_spans() { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - { - sp_io::init_tracing(); - let span_id = wasm_tracing::enter_span(Default::default()); - wasm_tracing::exit(span_id); - } - wasm_tracing::exit(span_id); - } - - fn returns_mutable_static() -> u64 { - unsafe { - MUTABLE_STATIC += 1; - MUTABLE_STATIC - } - } - - fn returns_mutable_static_bss() -> u64 { - unsafe { - MUTABLE_STATIC_BSS += 1; - MUTABLE_STATIC_BSS - } - } - - fn allocates_huge_stack_array(trap: bool) -> Vec { - // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). - // This will just decrease (stacks in wasm32-u-u grow downwards) the stack - // pointer. This won't trap on the current compilers. - let mut data = [0u8; 1024 * 768]; - - // Then make sure we actually write something to it. - // - // If: - // 1. the stack area is placed at the beginning of the linear memory space, and - // 2. the stack pointer points to out-of-bounds area, and - // 3. a write is performed around the current stack pointer. - // - // then a trap should happen. - // - for (i, v) in data.iter_mut().enumerate() { - *v = i as u8; // deliberate truncation - } - - if trap { - // There is a small chance of this to be pulled up in theory. In practice - // the probability of that is rather low. - panic!() - } - - data.to_vec() - } - - // Check that the heap at `heap_base + offset` don't contains the test message. - // After the check succeeds the test message is written into the heap. - // - // It is expected that the given pointer is not allocated. - fn check_and_set_in_heap(heap_base: u32, offset: u32) { - let test_message = b"Hello invalid heap memory"; - let ptr = unsafe { (heap_base + offset) as *mut u8 }; - - let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; - - assert_ne!(test_message, message_slice); - message_slice.copy_from_slice(test_message); - } - - fn test_spawn() { - let data = vec![1u8, 2u8]; - let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); - - assert_eq!(data_new, vec![2u8, 3u8]); - } - - fn test_nested_spawn() { - let data = vec![7u8, 13u8]; - let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); - - assert_eq!(data_new, vec![10u8, 16u8]); - } - - fn test_panic_in_spawned() { - sp_tasks::spawn(tasks::panicker, vec![]).join(); - } - } + fn test_clear_prefix(input: Vec) -> Vec { + storage::clear_prefix(&input, None); + b"all ok!".to_vec() + } + + fn test_empty_return() {} + + fn test_dirty_plenty_memory(heap_base: u32, heap_pages: u32) { + // This piece of code will dirty multiple pages of memory. The number of pages is given by + // the `heap_pages`. It's unit is a wasm page (64KiB). The first page to be cleared + // is a wasm page that that follows the one that holds the `heap_base` address. + // + // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take + // 16 writes to process a single wasm page. + + let mut heap_ptr = heap_base as usize; + + // Find the next wasm page boundary. + let heap_ptr = round_up_to(heap_ptr, 65536); + + // Make it an actual pointer + let heap_ptr = heap_ptr as *mut u8; + + // Traverse the host pages and make each one dirty + let host_pages = heap_pages as usize * 16; + for i in 0..host_pages { + unsafe { + // technically this is an UB, but there is no way Rust can find this out. + heap_ptr.add(i * 4096).write(0); + } + } + + fn round_up_to(n: usize, divisor: usize) -> usize { + (n + divisor - 1) / divisor + } + } + + fn test_exhaust_heap() -> Vec { Vec::with_capacity(16777216) } + + fn test_fp_f32add(a: [u8; 4], b: [u8; 4]) -> [u8; 4] { + let a = f32::from_le_bytes(a); + let b = f32::from_le_bytes(b); + f32::to_le_bytes(a + b) + } + + fn test_panic() { panic!("test panic") } + + fn test_conditional_panic(input: Vec) -> Vec { + if input.len() > 0 { + panic!("test panic") + } + + input + } + + fn test_blake2_256(input: Vec) -> Vec { + blake2_256(&input).to_vec() + } + + fn test_blake2_128(input: Vec) -> Vec { + blake2_128(&input).to_vec() + } + + fn test_sha2_256(input: Vec) -> Vec { + sha2_256(&input).to_vec() + } + + fn test_twox_256(input: Vec) -> Vec { + twox_256(&input).to_vec() + } + + fn test_twox_128(input: Vec) -> Vec { + twox_128(&input).to_vec() + } + + fn test_ed25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) + } + + fn test_sr25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) + } + + fn test_ordered_trie_root() -> Vec { + BlakeTwo256::ordered_trie_root( + vec![ + b"zero"[..].into(), + b"one"[..].into(), + b"two"[..].into(), + ], + ).as_ref().to_vec() + } + + fn test_sandbox(code: Vec) -> bool { + execute_sandboxed(&code, &[]).is_ok() + } + + fn test_sandbox_args(code: Vec) -> bool { + execute_sandboxed( + &code, + &[ + Value::I32(0x12345678), + Value::I64(0x1234567887654321), + ], + ).is_ok() + } + + fn test_sandbox_return_val(code: Vec) -> bool { + let ok = match execute_sandboxed( + &code, + &[ + Value::I32(0x1336), + ] + ) { + Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, + _ => false, + }; + + ok + } + + fn test_sandbox_instantiate(code: Vec) -> u8 { + let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let code = match sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { + Ok(_) => 0, + Err(sp_sandbox::Error::Module) => 1, + Err(sp_sandbox::Error::Execution) => 2, + Err(sp_sandbox::Error::OutOfBounds) => 3, + }; + + code + } + + fn test_sandbox_get_global_val(code: Vec) -> i64 { + let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { + i + } else { + return 20; + }; + + match instance.get_global_val("test_global") { + Some(sp_sandbox::Value::I64(val)) => val, + None => 30, + val => 40, + } + } + + fn test_offchain_index_set() { + sp_io::offchain_index::set(b"k", b"v"); + } + + fn test_offchain_local_storage() -> bool { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + sp_io::offchain::local_storage_set(kind, b"test", b"asd"); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); + + let res = sp_io::offchain::local_storage_compare_and_set( + kind, + b"test", + Some(b"asd".to_vec()), + b"", + ); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); + res + } + + fn test_offchain_local_storage_with_none() { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + + let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); + assert_eq!(res, true); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); + } + + fn test_offchain_http() -> bool { + use sp_core::offchain::HttpRequestStatus; + let run = || -> Option<()> { + let id = sp_io::offchain::http_request_start( + "POST", + "http://localhost:12345", + &[], + ).ok()?; + sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; + sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; + sp_io::offchain::http_request_write_body(id, &[], None).ok()?; + let status = sp_io::offchain::http_response_wait(&[id], None); + assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); + let headers = sp_io::offchain::http_response_headers(id); + assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); + let mut buffer = vec![0; 64]; + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 3); + assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 0); + + Some(()) + }; + + run().is_some() + } + + fn test_enter_span() -> u64 { + wasm_tracing::enter_span(Default::default()) + } + + fn test_exit_span(span_id: u64) { + wasm_tracing::exit(span_id) + } + + fn test_nested_spans() { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + { + sp_io::init_tracing(); + let span_id = wasm_tracing::enter_span(Default::default()); + wasm_tracing::exit(span_id); + } + wasm_tracing::exit(span_id); + } + + fn returns_mutable_static() -> u64 { + unsafe { + MUTABLE_STATIC += 1; + MUTABLE_STATIC + } + } + + fn returns_mutable_static_bss() -> u64 { + unsafe { + MUTABLE_STATIC_BSS += 1; + MUTABLE_STATIC_BSS + } + } + + fn allocates_huge_stack_array(trap: bool) -> Vec { + // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). + // This will just decrease (stacks in wasm32-u-u grow downwards) the stack + // pointer. This won't trap on the current compilers. + let mut data = [0u8; 1024 * 768]; + + // Then make sure we actually write something to it. + // + // If: + // 1. the stack area is placed at the beginning of the linear memory space, and + // 2. the stack pointer points to out-of-bounds area, and + // 3. a write is performed around the current stack pointer. + // + // then a trap should happen. + // + for (i, v) in data.iter_mut().enumerate() { + *v = i as u8; // deliberate truncation + } + + if trap { + // There is a small chance of this to be pulled up in theory. In practice + // the probability of that is rather low. + panic!() + } + + data.to_vec() + } + + // Check that the heap at `heap_base + offset` don't contains the test message. + // After the check succeeds the test message is written into the heap. + // + // It is expected that the given pointer is not allocated. + fn check_and_set_in_heap(heap_base: u32, offset: u32) { + let test_message = b"Hello invalid heap memory"; + let ptr = unsafe { (heap_base + offset) as *mut u8 }; + + let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; + + assert_ne!(test_message, message_slice); + message_slice.copy_from_slice(test_message); + } + + fn test_spawn() { + let data = vec![1u8, 2u8]; + let data_new = sp_tasks::spawn(tasks::incrementer, data).join(); + + assert_eq!(data_new, vec![2u8, 3u8]); + } + + fn test_nested_spawn() { + let data = vec![7u8, 13u8]; + let data_new = sp_tasks::spawn(tasks::parallel_incrementer, data).join(); + + assert_eq!(data_new, vec![10u8, 16u8]); + } + + fn test_panic_in_spawned() { + sp_tasks::spawn(tasks::panicker, vec![]).join(); + } +} - #[cfg(not(feature = "std"))] - mod tasks { +#[cfg(not(feature = "std"))] +mod tasks { use sp_std::prelude::*; pub fn incrementer(data: Vec) -> Vec { - data.into_iter().map(|v| v + 1).collect() + data.into_iter().map(|v| v + 1).collect() } pub fn panicker(_: Vec) -> Vec { @@ -396,11 +402,11 @@ sp_core::wasm_export_functions! { } pub fn parallel_incrementer(data: Vec) -> Vec { - let first = data.into_iter().map(|v| v + 2).collect::>(); - let second = sp_tasks::spawn(incrementer, first).join(); - second + let first = data.into_iter().map(|v| v + 2).collect::>(); + let second = sp_tasks::spawn(incrementer, first).join(); + second } - } +} #[cfg(not(feature = "std"))] fn execute_sandboxed( @@ -416,7 +422,7 @@ fn execute_sandboxed( args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError); + return Err(sp_sandbox::HostError) } let condition = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; if condition != 0 { @@ -430,7 +436,7 @@ fn execute_sandboxed( args: &[Value], ) -> Result { if args.len() != 1 { - return Err(sp_sandbox::HostError); + return Err(sp_sandbox::HostError) } let inc_by = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; e.counter += inc_by as u32; @@ -445,7 +451,8 @@ fn execute_sandboxed( env_builder.add_host_func("env", "inc_counter", env_inc_counter); let memory = match sp_sandbox::Memory::new(1, Some(16)) { Ok(m) => m, - Err(_) => unreachable!(" + Err(_) => unreachable!( + " Memory::new() can return Err only if parameters are borked; \ We passing params here explicitly and they're correct; \ Memory::new() can't return a Error qed" diff --git a/substrate/client/executor/src/integration_tests/linux.rs b/substrate/client/executor/src/integration_tests/linux.rs index 057cc1332717b97b5f3e94f10b5033874a36b893..7e0696973dc779e2e0e59528e4d554fccc8cb982 100644 --- a/substrate/client/executor/src/integration_tests/linux.rs +++ b/substrate/client/executor/src/integration_tests/linux.rs @@ -23,8 +23,8 @@ // borthersome. #![cfg(feature = "wasmtime")] -use crate::WasmExecutionMethod; use super::mk_test_runtime; +use crate::WasmExecutionMethod; use codec::Encode as _; mod smaps; @@ -54,17 +54,11 @@ fn memory_consumption_compiled() { } instance - .call_export( - "test_dirty_plenty_memory", - &(heap_base as u32, 1u32).encode(), - ) + .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1u32).encode()) .unwrap(); let probe_1 = probe_rss(&*instance); instance - .call_export( - "test_dirty_plenty_memory", - &(heap_base as u32, 1024u32).encode(), - ) + .call_export("test_dirty_plenty_memory", &(heap_base as u32, 1024u32).encode()) .unwrap(); let probe_2 = probe_rss(&*instance); diff --git a/substrate/client/executor/src/integration_tests/linux/smaps.rs b/substrate/client/executor/src/integration_tests/linux/smaps.rs index 8088a5a3ea95233dc6c362e2847f0beb09e2a69c..b23a188b93a26a6fc22ef7861cba2f070d7b4448 100644 --- a/substrate/client/executor/src/integration_tests/linux/smaps.rs +++ b/substrate/client/executor/src/integration_tests/linux/smaps.rs @@ -19,8 +19,7 @@ //! A tool for extracting information about the memory consumption of the current process from //! the procfs. -use std::ops::Range; -use std::collections::BTreeMap; +use std::{collections::BTreeMap, ops::Range}; /// An interface to the /proc/self/smaps /// @@ -69,7 +68,8 @@ impl Smaps { } fn get_map(&self, addr: usize) -> &BTreeMap { - &self.0 + &self + .0 .iter() .find(|(range, _)| addr >= range.start && addr < range.end) .unwrap() diff --git a/substrate/client/executor/src/integration_tests/mod.rs b/substrate/client/executor/src/integration_tests/mod.rs index 0762306309df46acfd96db10b85abbce3390fdaa..dabead4799dc8222ed20a5c996a042ee3c47e913 100644 --- a/substrate/client/executor/src/integration_tests/mod.rs +++ b/substrate/client/executor/src/integration_tests/mod.rs @@ -20,20 +20,22 @@ mod linux; mod sandbox; -use std::sync::Arc; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use hex_literal::hex; +use sc_executor_common::{runtime_blob::RuntimeBlob, wasm_runtime::WasmModule}; +use sc_runtime_test::wasm_binary_unwrap; use sp_core::{ - blake2_128, blake2_256, ed25519, sr25519, map, Pair, - offchain::{OffchainWorkerExt, OffchainDbExt, testing}, + blake2_128, blake2_256, ed25519, map, + offchain::{testing, OffchainDbExt, OffchainWorkerExt}, + sr25519, traits::Externalities, + Pair, }; -use sc_runtime_test::wasm_binary_unwrap; +use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; use sp_wasm_interface::HostFunctions as _; -use sp_runtime::traits::BlakeTwo256; -use sc_executor_common::{wasm_runtime::WasmModule, runtime_blob::RuntimeBlob}; +use std::sync::Arc; use tracing_subscriber::layer::SubscriberExt; use crate::WasmExecutionMethod; @@ -96,12 +98,7 @@ fn returning_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let output = call_in_wasm( - "test_empty_return", - &[], - wasm_method, - &mut ext, - ).unwrap(); + let output = call_in_wasm("test_empty_return", &[], wasm_method, &mut ext).unwrap(); assert_eq!(output, vec![0u8; 0]); } @@ -164,28 +161,13 @@ fn panicking_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let output = call_in_wasm( - "test_panic", - &[], - wasm_method, - &mut ext, - ); + let output = call_in_wasm("test_panic", &[], wasm_method, &mut ext); assert!(output.is_err()); - let output = call_in_wasm( - "test_conditional_panic", - &[0], - wasm_method, - &mut ext, - ); + let output = call_in_wasm("test_conditional_panic", &[0], wasm_method, &mut ext); assert_eq!(Decode::decode(&mut &output.unwrap()[..]), Ok(Vec::::new())); - let output = call_in_wasm( - "test_conditional_panic", - &vec![2].encode(), - wasm_method, - &mut ext, - ); + let output = call_in_wasm("test_conditional_panic", &vec![2].encode(), wasm_method, &mut ext); assert!(output.is_err()); } @@ -197,12 +179,9 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - let output = call_in_wasm( - "test_data_in", - &b"Hello world".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); + let output = + call_in_wasm("test_data_in", &b"Hello world".to_vec().encode(), wasm_method, &mut ext) + .unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); } @@ -230,12 +209,9 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { ext.set_storage(b"bbb".to_vec(), b"5".to_vec()); // This will clear all entries which prefix is "ab". - let output = call_in_wasm( - "test_clear_prefix", - &b"ab".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); + let output = + call_in_wasm("test_clear_prefix", &b"ab".to_vec().encode(), wasm_method, &mut ext) + .unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); } @@ -256,21 +232,12 @@ fn blake2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_blake2_256", - &[0], - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_256", &[0], wasm_method, &mut ext,).unwrap(), blake2_256(&b""[..]).to_vec().encode(), ); assert_eq!( - call_in_wasm( - "test_blake2_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), blake2_256(&b"Hello world!"[..]).to_vec().encode(), ); } @@ -280,21 +247,12 @@ fn blake2_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_blake2_128", - &[0], - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_128", &[0], wasm_method, &mut ext,).unwrap(), blake2_128(&b""[..]).to_vec().encode(), ); assert_eq!( - call_in_wasm( - "test_blake2_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_blake2_128", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), blake2_128(&b"Hello world!"[..]).to_vec().encode(), ); } @@ -304,25 +262,14 @@ fn sha2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_sha2_256", - &[0], - wasm_method, - &mut ext, - ) - .unwrap(), + call_in_wasm("test_sha2_256", &[0], wasm_method, &mut ext,).unwrap(), hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") .to_vec() .encode(), ); assert_eq!( - call_in_wasm( - "test_sha2_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ) - .unwrap(), + call_in_wasm("test_sha2_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), hex!("c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a") .to_vec() .encode(), @@ -334,26 +281,17 @@ fn twox_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_twox_256", - &[0], - wasm_method, - &mut ext, - ).unwrap(), - hex!( - "99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a" - ).to_vec().encode(), + call_in_wasm("test_twox_256", &[0], wasm_method, &mut ext,).unwrap(), + hex!("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") + .to_vec() + .encode(), ); assert_eq!( - call_in_wasm( - "test_twox_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), - hex!( - "b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74" - ).to_vec().encode(), + call_in_wasm("test_twox_256", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), + hex!("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") + .to_vec() + .encode(), ); } @@ -362,21 +300,12 @@ fn twox_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); assert_eq!( - call_in_wasm( - "test_twox_128", - &[0], - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_twox_128", &[0], wasm_method, &mut ext,).unwrap(), hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), ); assert_eq!( - call_in_wasm( - "test_twox_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_twox_128", &b"Hello world!".to_vec().encode(), wasm_method, &mut ext,) + .unwrap(), hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), ); } @@ -392,12 +321,7 @@ fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(sig.as_ref()); assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_ed25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), true.encode(), ); @@ -407,12 +331,7 @@ fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(other_sig.as_ref()); assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_ed25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), false.encode(), ); } @@ -428,12 +347,7 @@ fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(sig.as_ref()); assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sr25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), true.encode(), ); @@ -443,12 +357,7 @@ fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { calldata.extend_from_slice(other_sig.as_ref()); assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sr25519_verify", &calldata.encode(), wasm_method, &mut ext,).unwrap(), false.encode(), ); } @@ -458,12 +367,7 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; assert_eq!( - call_in_wasm( - "test_ordered_trie_root", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), + call_in_wasm("test_ordered_trie_root", &[0], wasm_method, &mut ext.ext(),).unwrap(), Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), ); } @@ -473,17 +377,14 @@ fn offchain_index(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let (offchain, _state) = testing::TestOffchainExt::new(); ext.register_extension(OffchainWorkerExt::new(offchain)); - call_in_wasm( - "test_offchain_index_set", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(); + call_in_wasm("test_offchain_index_set", &[0], wasm_method, &mut ext.ext()).unwrap(); use sp_core::offchain::OffchainOverlayedChange; - let data = ext.overlayed_changes().clone().offchain_drain_committed().find(|(k, _v)| { - k == &(sp_core::offchain::STORAGE_PREFIX.to_vec(), b"k".to_vec()) - }); + let data = ext + .overlayed_changes() + .clone() + .offchain_drain_committed() + .find(|(k, _v)| k == &(sp_core::offchain::STORAGE_PREFIX.to_vec(), b"k".to_vec())); assert_eq!(data.map(|data| data.1), Some(OffchainOverlayedChange::SetValue(b"v".to_vec()))); } @@ -494,12 +395,7 @@ fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { ext.register_extension(OffchainDbExt::new(offchain.clone())); ext.register_extension(OffchainWorkerExt::new(offchain)); assert_eq!( - call_in_wasm( - "test_offchain_local_storage", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), + call_in_wasm("test_offchain_local_storage", &[0], wasm_method, &mut ext.ext(),).unwrap(), true.encode(), ); assert_eq!(state.read().persistent_storage.get(b"test"), Some(vec![])); @@ -511,24 +407,18 @@ fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { let (offchain, state) = testing::TestOffchainExt::new(); ext.register_extension(OffchainWorkerExt::new(offchain)); state.write().expect_request(testing::PendingRequest { - method: "POST".into(), - uri: "http://localhost:12345".into(), - body: vec![1, 2, 3, 4], - headers: vec![("X-Auth".to_owned(), "test".to_owned())], - sent: true, - response: Some(vec![1, 2, 3]), - response_headers: vec![("X-Auth".to_owned(), "hello".to_owned())], - ..Default::default() - }, - ); + method: "POST".into(), + uri: "http://localhost:12345".into(), + body: vec![1, 2, 3, 4], + headers: vec![("X-Auth".to_owned(), "test".to_owned())], + sent: true, + response: Some(vec![1, 2, 3]), + response_headers: vec![("X-Auth".to_owned(), "hello".to_owned())], + ..Default::default() + }); assert_eq!( - call_in_wasm( - "test_offchain_http", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), + call_in_wasm("test_offchain_http", &[0], wasm_method, &mut ext.ext(),).unwrap(), true.encode(), ); } @@ -539,7 +429,7 @@ fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { let executor = crate::WasmExecutor::new( wasm_method, - Some(17), // `17` is the initial number of pages compiled into the binary. + Some(17), // `17` is the initial number of pages compiled into the binary. HostFunctions::host_functions(), 8, None, @@ -593,17 +483,13 @@ fn returns_mutable_static_bss(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); let instance = runtime.new_instance().unwrap(); - let res = instance - .call_export("returns_mutable_static_bss", &[0]) - .unwrap(); + let res = instance.call_export("returns_mutable_static_bss", &[0]).unwrap(); assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); // We expect that every invocation will need to return the initial // value plus one. If the value increases more than that then it is // a sign that the wasm runtime preserves the memory content. - let res = instance - .call_export("returns_mutable_static_bss", &[0]) - .unwrap(); + let res = instance.call_export("returns_mutable_static_bss", &[0]).unwrap(); assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); } @@ -638,7 +524,8 @@ fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); let instance = runtime.new_instance().unwrap(); - let heap_base = instance.get_global_const("__heap_base") + let heap_base = instance + .get_global_const("__heap_base") .expect("`__heap_base` is valid") .expect("`__heap_base` exists") .as_i32() @@ -689,8 +576,8 @@ fn parallel_execution(wasm_method: WasmExecutionMethod) { test_wasm_execution!(wasm_tracing_should_work); fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { - use std::sync::Mutex; use sc_tracing::{SpanDatum, TraceEvent}; + use std::sync::Mutex; struct TestTraceHandler(Arc>>); @@ -706,36 +593,23 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { let handler = TestTraceHandler(traces.clone()); // Create subscriber with wasm_tracing disabled - let test_subscriber = tracing_subscriber::fmt().finish().with( - sc_tracing::ProfilingLayer::new_with_handler( - Box::new(handler), "default" - ) - ); + let test_subscriber = tracing_subscriber::fmt() + .finish() + .with(sc_tracing::ProfilingLayer::new_with_handler(Box::new(handler), "default")); let _guard = tracing::subscriber::set_default(test_subscriber); let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let span_id = call_in_wasm( - "test_enter_span", - Default::default(), - wasm_method, - &mut ext, - ).unwrap(); + let span_id = + call_in_wasm("test_enter_span", Default::default(), wasm_method, &mut ext).unwrap(); let span_id = u64::decode(&mut &span_id[..]).unwrap(); - assert!( - span_id > 0 - ); + assert!(span_id > 0); - call_in_wasm( - "test_exit_span", - &span_id.encode(), - wasm_method, - &mut ext, - ).unwrap(); + call_in_wasm("test_exit_span", &span_id.encode(), wasm_method, &mut ext).unwrap(); // Check there is only the single trace let len = traces.lock().unwrap().len(); @@ -747,12 +621,7 @@ fn wasm_tracing_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(span_datum.name, ""); assert_eq!(values.bool_values.get("wasm").unwrap(), &true); - call_in_wasm( - "test_nested_spans", - Default::default(), - wasm_method, - &mut ext, - ).unwrap(); + call_in_wasm("test_nested_spans", Default::default(), wasm_method, &mut ext).unwrap(); let len = traces.lock().unwrap().len(); assert_eq!(len, 2); } @@ -762,12 +631,7 @@ fn spawning_runtime_instance_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - call_in_wasm( - "test_spawn", - &[], - wasm_method, - &mut ext, - ).unwrap(); + call_in_wasm("test_spawn", &[], wasm_method, &mut ext).unwrap(); } test_wasm_execution!(spawning_runtime_instance_nested_should_work); @@ -775,12 +639,7 @@ fn spawning_runtime_instance_nested_should_work(wasm_method: WasmExecutionMethod let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - call_in_wasm( - "test_nested_spawn", - &[], - wasm_method, - &mut ext, - ).unwrap(); + call_in_wasm("test_nested_spawn", &[], wasm_method, &mut ext).unwrap(); } test_wasm_execution!(panic_in_spawned_instance_panics_on_joining_its_result); @@ -788,12 +647,8 @@ fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecu let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let error_result = call_in_wasm( - "test_panic_in_spawned", - &[], - wasm_method, - &mut ext, - ).unwrap_err(); + let error_result = + call_in_wasm("test_panic_in_spawned", &[], wasm_method, &mut ext).unwrap_err(); assert!(format!("{}", error_result).contains("Spawned task")); } diff --git a/substrate/client/executor/src/integration_tests/sandbox.rs b/substrate/client/executor/src/integration_tests/sandbox.rs index 7ce9c94a2db8ae667a9446b069220355db4cb246..ee3b295ae8a85b8bcd4c0f889eac6db3eb065e21 100644 --- a/substrate/client/executor/src/integration_tests/sandbox.rs +++ b/substrate/client/executor/src/integration_tests/sandbox.rs @@ -16,9 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::{TestExternalities, call_in_wasm}; -use crate::WasmExecutionMethod; -use crate::test_wasm_execution; +use super::{call_in_wasm, TestExternalities}; +use crate::{test_wasm_execution, WasmExecutionMethod}; use codec::Encode; @@ -27,7 +26,8 @@ fn sandbox_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -46,17 +46,12 @@ fn sandbox_should_work(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext,).unwrap(), true.encode(),); } test_wasm_execution!(sandbox_trap); @@ -64,7 +59,8 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) (func (export "call") @@ -72,17 +68,11 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap(); + "#, + ) + .unwrap(); - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - vec![0], - ); + assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext,).unwrap(), vec![0],); } test_wasm_execution!(start_called); @@ -90,7 +80,8 @@ fn start_called(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -115,17 +106,12 @@ fn start_called(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + assert_eq!(call_in_wasm("test_sandbox", &code, wasm_method, &mut ext,).unwrap(), true.encode(),); } test_wasm_execution!(invoke_args); @@ -133,7 +119,8 @@ fn invoke_args(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "assert" (func $assert (param i32))) @@ -154,15 +141,13 @@ fn invoke_args(wasm_method: WasmExecutionMethod) { ) ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_args", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_args", &code, wasm_method, &mut ext,).unwrap(), true.encode(), ); } @@ -172,7 +157,8 @@ fn return_val(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -181,15 +167,13 @@ fn return_val(wasm_method: WasmExecutionMethod) { ) ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_return_val", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_return_val", &code, wasm_method, &mut ext,).unwrap(), true.encode(), ); } @@ -199,22 +183,21 @@ fn unlinkable_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (import "env" "non-existent" (func)) (func (export "call") ) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 1u8.encode(), ); } @@ -228,12 +211,7 @@ fn corrupted_module(wasm_method: WasmExecutionMethod) { let code = vec![0u8, 0, 0, 0, 1, 0, 0, 0].encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 1u8.encode(), ); } @@ -243,7 +221,8 @@ fn start_fn_ok(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") ) @@ -253,15 +232,13 @@ fn start_fn_ok(wasm_method: WasmExecutionMethod) { (start $start) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 0u8.encode(), ); } @@ -271,7 +248,8 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (func (export "call") ) @@ -282,15 +260,13 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { (start $start) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), 2u8.encode(), ); } @@ -300,19 +276,18 @@ fn get_global_val_works(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let code = wat::parse_str(r#" + let code = wat::parse_str( + r#" (module (global (export "test_global") i64 (i64.const 500)) ) - "#).unwrap().encode(); + "#, + ) + .unwrap() + .encode(); assert_eq!( - call_in_wasm( - "test_sandbox_get_global_val", - &code, - wasm_method, - &mut ext, - ).unwrap(), + call_in_wasm("test_sandbox_get_global_val", &code, wasm_method, &mut ext,).unwrap(), 500i64.encode(), ); } diff --git a/substrate/client/executor/src/lib.rs b/substrate/client/executor/src/lib.rs index c0cbf9c94dafdaf470b23206bffc1aecf08b0b63..f4b972a86f27ab154243d2add55cb014bf3f0edd 100644 --- a/substrate/client/executor/src/lib.rs +++ b/substrate/client/executor/src/lib.rs @@ -29,26 +29,25 @@ //! wasm engine used, instance cache. #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] #[macro_use] mod native_executor; -mod wasm_runtime; #[cfg(test)] mod integration_tests; +mod wasm_runtime; -pub use wasmi; +pub use codec::Codec; pub use native_executor::{ - with_externalities_safe, NativeExecutor, WasmExecutor, NativeExecutionDispatch, + with_externalities_safe, NativeExecutionDispatch, NativeExecutor, WasmExecutor, }; -pub use sp_version::{RuntimeVersion, NativeVersion}; -pub use codec::Codec; #[doc(hidden)] -pub use sp_core::traits::{Externalities}; +pub use sp_core::traits::Externalities; +pub use sp_version::{NativeVersion, RuntimeVersion}; #[doc(hidden)] pub use sp_wasm_interface; -pub use wasm_runtime::WasmExecutionMethod; -pub use wasm_runtime::read_embedded_version; +pub use wasm_runtime::{read_embedded_version, WasmExecutionMethod}; +pub use wasmi; pub use sc_executor_common::{error, sandbox}; @@ -68,10 +67,10 @@ pub trait RuntimeInfo { #[cfg(test)] mod tests { use super::*; + use sc_executor_common::runtime_blob::RuntimeBlob; use sc_runtime_test::wasm_binary_unwrap; use sp_io::TestExternalities; use sp_wasm_interface::HostFunctions; - use sc_executor_common::runtime_blob::RuntimeBlob; #[test] fn call_in_interpreted_wasm_works() { diff --git a/substrate/client/executor/src/native_executor.rs b/substrate/client/executor/src/native_executor.rs index 6fc34b6f1a3225d17b80af73732be8b475995522..e54803d2d074fb7839681f614bf5073d487de402 100644 --- a/substrate/client/executor/src/native_executor.rs +++ b/substrate/client/executor/src/native_executor.rs @@ -17,32 +17,36 @@ // along with this program. If not, see . use crate::{ - RuntimeInfo, error::{Error, Result}, + error::{Error, Result}, wasm_runtime::{RuntimeCache, WasmExecutionMethod}, + RuntimeInfo, }; use std::{ collections::HashMap, - panic::{UnwindSafe, AssertUnwindSafe}, - result, - sync::{Arc, atomic::{AtomicU64, Ordering}, mpsc}, + panic::{AssertUnwindSafe, UnwindSafe}, path::PathBuf, + result, + sync::{ + atomic::{AtomicU64, Ordering}, + mpsc, Arc, + }, }; -use sp_version::{NativeVersion, RuntimeVersion}; use codec::{Decode, Encode}; -use sp_core::{ - NativeOrEncoded, - traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawnExt, RuntimeSpawn}, -}; use log::trace; -use sp_wasm_interface::{HostFunctions, Function}; use sc_executor_common::{ - wasm_runtime::{WasmInstance, WasmModule, InvokeMethod}, runtime_blob::RuntimeBlob, + wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, +}; +use sp_core::{ + traits::{CodeExecutor, Externalities, RuntimeCode, RuntimeSpawn, RuntimeSpawnExt}, + NativeOrEncoded, }; use sp_externalities::ExternalitiesExt as _; use sp_tasks::new_async_externalities; +use sp_version::{NativeVersion, RuntimeVersion}; +use sp_wasm_interface::{Function, HostFunctions}; /// Default num of pages for the heap const DEFAULT_HEAP_PAGES: u64 = 2048; @@ -51,25 +55,23 @@ const DEFAULT_HEAP_PAGES: u64 = 2048; /// /// If the inner closure panics, it will be caught and return an error. pub fn with_externalities_safe(ext: &mut dyn Externalities, f: F) -> Result - where F: UnwindSafe + FnOnce() -> U +where + F: UnwindSafe + FnOnce() -> U, { - sp_externalities::set_and_run_with_externalities( - ext, - move || { - // Substrate uses custom panic hook that terminates process on panic. Disable - // termination for the native call. - let _guard = sp_panic_handler::AbortGuard::force_unwind(); - std::panic::catch_unwind(f).map_err(|e| { - if let Some(err) = e.downcast_ref::() { - Error::RuntimePanicked(err.clone()) - } else if let Some(err) = e.downcast_ref::<&'static str>() { - Error::RuntimePanicked(err.to_string()) - } else { - Error::RuntimePanicked("Unknown panic".into()) - } - }) - }, - ) + sp_externalities::set_and_run_with_externalities(ext, move || { + // Substrate uses custom panic hook that terminates process on panic. Disable + // termination for the native call. + let _guard = sp_panic_handler::AbortGuard::force_unwind(); + std::panic::catch_unwind(f).map_err(|e| { + if let Some(err) = e.downcast_ref::() { + Error::RuntimePanicked(err.clone()) + } else if let Some(err) = e.downcast_ref::<&'static str>() { + Error::RuntimePanicked(err.to_string()) + } else { + Error::RuntimePanicked("Unknown panic".into()) + } + }) + }) } /// Delegate for dispatching a CodeExecutor call. @@ -163,7 +165,8 @@ impl WasmExecutor { allow_missing_host_functions: bool, f: F, ) -> Result - where F: FnOnce( + where + F: FnOnce( AssertUnwindSafe<&Arc>, AssertUnwindSafe<&dyn WasmInstance>, Option<&RuntimeVersion>, @@ -182,7 +185,7 @@ impl WasmExecutor { let instance = AssertUnwindSafe(instance); let ext = AssertUnwindSafe(ext); f(module, instance, version, ext) - } + }, )? { Ok(r) => r, Err(e) => Err(e), @@ -245,7 +248,7 @@ impl sp_core::traits::ReadRuntimeVersion for WasmExecutor { .map_err(|e| format!("Failed to read the static section: {:?}", e)) .map(|v| v.map(|v| v.encode()))? { - return Ok(version); + return Ok(version) } // If the blob didn't have embedded runtime version section, we fallback to the legacy @@ -296,13 +299,13 @@ impl NativeExecutor { .into_iter() // filter out any host function overrides provided. .filter(|host_fn| { - extended.iter() + extended + .iter() .find(|ext_host_fn| host_fn.name() == ext_host_fn.name()) .is_none() }) .collect::>(); - // Add the custom host functions provided by the user. host_functions.extend(extended); let wasm_executor = WasmExecutor::new( @@ -331,13 +334,10 @@ impl RuntimeInfo for NativeExecutor { ext: &mut dyn Externalities, runtime_code: &RuntimeCode, ) -> Result { - self.wasm.with_instance( - runtime_code, - ext, - false, - |_module, _instance, version, _ext| - Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))), - ) + self.wasm + .with_instance(runtime_code, ext, false, |_module, _instance, version, _ext| { + Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) + }) } } @@ -358,70 +358,67 @@ impl RuntimeSpawn for RuntimeInstanceSpawn { let module = self.module.clone(); let scheduler = self.scheduler.clone(); - self.scheduler.spawn("executor-extra-runtime-instance", Box::pin(async move { - let module = AssertUnwindSafe(module); - - let async_ext = match new_async_externalities(scheduler.clone()) { - Ok(val) => val, - Err(e) => { - log::error!( - target: "executor", - "Failed to setup externalities for async context: {}", - e, - ); - - // This will drop sender and receiver end will panic - return; - } - }; - - let mut async_ext = match async_ext.with_runtime_spawn( - Box::new(RuntimeInstanceSpawn::new(module.clone(), scheduler)) - ) { - Ok(val) => val, - Err(e) => { - log::error!( - target: "executor", - "Failed to setup runtime extension for async externalities: {}", - e, - ); - - // This will drop sender and receiver end will panic - return; - } - }; + self.scheduler.spawn( + "executor-extra-runtime-instance", + Box::pin(async move { + let module = AssertUnwindSafe(module); + + let async_ext = match new_async_externalities(scheduler.clone()) { + Ok(val) => val, + Err(e) => { + log::error!( + target: "executor", + "Failed to setup externalities for async context: {}", + e, + ); + + // This will drop sender and receiver end will panic + return + }, + }; + + let mut async_ext = match async_ext.with_runtime_spawn(Box::new( + RuntimeInstanceSpawn::new(module.clone(), scheduler), + )) { + Ok(val) => val, + Err(e) => { + log::error!( + target: "executor", + "Failed to setup runtime extension for async externalities: {}", + e, + ); - let result = with_externalities_safe( - &mut async_ext, - move || { + // This will drop sender and receiver end will panic + return + }, + }; + let result = with_externalities_safe(&mut async_ext, move || { // FIXME: Should be refactored to shared "instance factory". // Instantiating wasm here every time is suboptimal at the moment, shared // pool of instances should be used. // // https://github.com/paritytech/substrate/issues/7354 - let instance = module.new_instance() - .expect("Failed to create new instance from module"); + let instance = + module.new_instance().expect("Failed to create new instance from module"); - instance.call( - InvokeMethod::TableWithWrapper { dispatcher_ref, func }, - &data[..], - ).expect("Failed to invoke instance.") - } - ); - - match result { - Ok(output) => { - let _ = sender.send(output); - }, - Err(error) => { - // If execution is panicked, the `join` in the original runtime code will panic as well, - // since the sender is dropped without sending anything. - log::error!("Call error in spawned task: {:?}", error); - }, - } - })); + instance + .call(InvokeMethod::TableWithWrapper { dispatcher_ref, func }, &data[..]) + .expect("Failed to invoke instance.") + }); + match result { + Ok(output) => { + let _ = sender.send(output); + }, + Err(error) => { + // If execution is panicked, the `join` in the original runtime code will panic as well, + // since the sender is dropped without sending anything. + log::error!("Call error in spawned task: {:?}", error); + }, + } + }), + ); new_handle } @@ -438,12 +435,7 @@ impl RuntimeInstanceSpawn { module: Arc, scheduler: Box, ) -> Self { - Self { - module, - scheduler, - counter: 0.into(), - tasks: HashMap::new().into(), - } + Self { module, scheduler, counter: 0.into(), tasks: HashMap::new().into() } } fn with_externalities_and_module( @@ -495,17 +487,13 @@ impl CodeExecutor for NativeExecutor { ext, false, |module, instance, onchain_version, mut ext| { - let onchain_version = onchain_version.ok_or_else( - || Error::ApiError("Unknown version".into()) - )?; + let onchain_version = + onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; - let can_call_with = onchain_version.can_call_with(&self.native_version.runtime_version); + let can_call_with = + onchain_version.can_call_with(&self.native_version.runtime_version); - match ( - use_native, - can_call_with, - native_call, - ) { + match (use_native, can_call_with, native_call) { (_, false, _) | (false, _, _) => { if !can_call_with { trace!( @@ -516,13 +504,10 @@ impl CodeExecutor for NativeExecutor { ); } - with_externalities_safe( - &mut **ext, - move || { - preregister_builtin_ext(module.clone()); - instance.call_export(method, data).map(NativeOrEncoded::Encoded) - } - ) + with_externalities_safe(&mut **ext, move || { + preregister_builtin_ext(module.clone()); + instance.call_export(method, data).map(NativeOrEncoded::Encoded) + }) }, (true, true, Some(call)) => { trace!( @@ -535,13 +520,10 @@ impl CodeExecutor for NativeExecutor { used_native = true; let res = with_externalities_safe(&mut **ext, move || (call)()) - .and_then(|r| r - .map(NativeOrEncoded::Native) - .map_err(Error::ApiError) - ); + .and_then(|r| r.map(NativeOrEncoded::Native).map_err(Error::ApiError)); Ok(res) - } + }, _ => { trace!( target: "executor", @@ -552,9 +534,9 @@ impl CodeExecutor for NativeExecutor { used_native = true; Ok(D::dispatch(&mut **ext, method, data).map(NativeOrEncoded::Encoded)) - } + }, } - } + }, ); (result, used_native) } @@ -617,7 +599,6 @@ impl sp_core::traits::ReadRuntimeVersion for NativeE /// /// When you have multiple interfaces, you can give the host functions as a tuple e.g.: /// `(my_interface::HostFunctions, my_interface2::HostFunctions)` -/// #[macro_export] macro_rules! native_executor_instance { ( $pub:vis $name:ident, $dispatcher:path, $version:path $(,)?) => { @@ -675,16 +656,9 @@ mod tests { #[test] fn native_executor_registers_custom_interface() { - let executor = NativeExecutor::::new( - WasmExecutionMethod::Interpreted, - None, - 8, - ); + let executor = NativeExecutor::::new(WasmExecutionMethod::Interpreted, None, 8); my_interface::HostFunctions::host_functions().iter().for_each(|function| { - assert_eq!( - executor.wasm.host_functions.iter().filter(|f| f == &function).count(), - 2, - ); + assert_eq!(executor.wasm.host_functions.iter().filter(|f| f == &function).count(), 2,); }); my_interface::say_hello_world("hey"); diff --git a/substrate/client/executor/src/wasm_runtime.rs b/substrate/client/executor/src/wasm_runtime.rs index 4e6febbf15b677126df6c4ac5a4830f95e1e9468..8674e7239255ba919c681f754518ad5b675ac5ad 100644 --- a/substrate/client/executor/src/wasm_runtime.rs +++ b/substrate/client/executor/src/wasm_runtime.rs @@ -21,17 +21,19 @@ //! The primary means of accessing the runtimes is through a cache which saves the reusable //! components of the runtime that are expensive to initialize. -use std::sync::Arc; use crate::error::{Error, WasmError}; -use parking_lot::Mutex; use codec::Decode; -use sp_core::traits::{Externalities, RuntimeCode, FetchRuntimeCode}; -use sp_version::RuntimeVersion; -use std::panic::AssertUnwindSafe; -use std::path::{Path, PathBuf}; +use parking_lot::Mutex; use sc_executor_common::{ - wasm_runtime::{WasmModule, WasmInstance}, runtime_blob::RuntimeBlob, + wasm_runtime::{WasmInstance, WasmModule}, +}; +use sp_core::traits::{Externalities, FetchRuntimeCode, RuntimeCode}; +use sp_version::RuntimeVersion; +use std::{ + panic::AssertUnwindSafe, + path::{Path, PathBuf}, + sync::Arc, }; use sp_wasm_interface::Function; @@ -70,27 +72,26 @@ struct VersionedRuntime { impl VersionedRuntime { /// Run the given closure `f` with an instance of this runtime. - fn with_instance<'c, R, F>( - &self, - ext: &mut dyn Externalities, - f: F, - ) -> Result - where F: FnOnce( + fn with_instance<'c, R, F>(&self, ext: &mut dyn Externalities, f: F) -> Result + where + F: FnOnce( &Arc, &dyn WasmInstance, Option<&RuntimeVersion>, - &mut dyn Externalities) - -> Result, + &mut dyn Externalities, + ) -> Result, { // Find a free instance - let instance = self.instances + let instance = self + .instances .iter() .enumerate() .find_map(|(index, i)| i.try_lock().map(|i| (index, i))); match instance { Some((index, mut locked)) => { - let (instance, new_inst) = locked.take() + let (instance, new_inst) = locked + .take() .map(|r| Ok((r, false))) .unwrap_or_else(|| self.module.new_instance().map(|i| (i, true)))?; @@ -131,7 +132,7 @@ impl VersionedRuntime { let instance = self.module.new_instance()?; f(&self.module, &*instance, self.version.as_ref(), ext) - } + }, } } } @@ -168,11 +169,7 @@ impl RuntimeCache { /// `cache_path` allows to specify an optional directory where the executor can store files /// for caching. pub fn new(max_runtime_instances: usize, cache_path: Option) -> RuntimeCache { - RuntimeCache { - runtimes: Default::default(), - max_runtime_instances, - cache_path, - } + RuntimeCache { runtimes: Default::default(), max_runtime_instances, cache_path } } /// Prepares a WASM module instance and executes given function for it. @@ -213,29 +210,31 @@ impl RuntimeCache { allow_missing_func_imports: bool, f: F, ) -> Result, Error> - where F: FnOnce( + where + F: FnOnce( &Arc, &dyn WasmInstance, Option<&RuntimeVersion>, - &mut dyn Externalities) - -> Result, + &mut dyn Externalities, + ) -> Result, { let code_hash = &runtime_code.hash; let heap_pages = runtime_code.heap_pages.unwrap_or(default_heap_pages); let mut runtimes = self.runtimes.lock(); // this must be released prior to calling f - let pos = runtimes.iter().position(|r| r.as_ref().map_or( - false, - |r| r.wasm_method == wasm_method && - r.code_hash == *code_hash && - r.heap_pages == heap_pages - )); + let pos = runtimes.iter().position(|r| { + r.as_ref().map_or(false, |r| { + r.wasm_method == wasm_method && + r.code_hash == *code_hash && + r.heap_pages == heap_pages + }) + }); let runtime = match pos { Some(n) => runtimes[n] .clone() .expect("`position` only returns `Some` for entries that are `Some`"), - None => { + None => { let code = runtime_code.fetch_runtime_code().ok_or(WasmError::CodeNotFound)?; #[cfg(not(target_os = "unknown"))] @@ -262,30 +261,29 @@ impl RuntimeCache { result.version, time.elapsed().as_millis(), ); - } + }, Err(ref err) => { log::warn!(target: "wasm-runtime", "Cannot create a runtime: {:?}", err); - } + }, } Arc::new(result?) - } + }, }; // Rearrange runtimes by last recently used. match pos { Some(0) => {}, - Some(n) => { - for i in (1 .. n + 1).rev() { + Some(n) => + for i in (1..n + 1).rev() { runtimes.swap(i, i - 1); - } - } + }, None => { - runtimes[MAX_RUNTIMES-1] = Some(runtime.clone()); - for i in (1 .. MAX_RUNTIMES).rev() { + runtimes[MAX_RUNTIMES - 1] = Some(runtime.clone()); + for i in (1..MAX_RUNTIMES).rev() { runtimes.swap(i, i - 1); } - } + }, } drop(runtimes); @@ -317,49 +315,48 @@ pub fn create_wasm_runtime_with_code( allow_missing_func_imports, ) .map(|runtime| -> Arc { Arc::new(runtime) }) - } + }, #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled => { - sc_executor_wasmtime::create_runtime( - blob, - sc_executor_wasmtime::Config { - heap_pages: heap_pages as u32, - allow_missing_func_imports, - cache_path: cache_path.map(ToOwned::to_owned), - semantics: sc_executor_wasmtime::Semantics { - fast_instance_reuse: true, - deterministic_stack_limit: None, - canonicalize_nans: false, - }, + WasmExecutionMethod::Compiled => sc_executor_wasmtime::create_runtime( + blob, + sc_executor_wasmtime::Config { + heap_pages: heap_pages as u32, + allow_missing_func_imports, + cache_path: cache_path.map(ToOwned::to_owned), + semantics: sc_executor_wasmtime::Semantics { + fast_instance_reuse: true, + deterministic_stack_limit: None, + canonicalize_nans: false, }, - host_functions, - ).map(|runtime| -> Arc { Arc::new(runtime) }) - }, + }, + host_functions, + ) + .map(|runtime| -> Arc { Arc::new(runtime) }), } } fn decode_version(mut version: &[u8]) -> Result { let v: RuntimeVersion = sp_api::OldRuntimeVersion::decode(&mut &version[..]) - .map_err(|_| - WasmError::Instantiation( - "failed to decode \"Core_version\" result using old runtime version".into(), - ) - )?.into(); + .map_err(|_| { + WasmError::Instantiation( + "failed to decode \"Core_version\" result using old runtime version".into(), + ) + })? + .into(); let core_api_id = sp_core::hashing::blake2_64(b"Core"); if v.has_api_with(&core_api_id, |v| v >= 3) { - sp_api::RuntimeVersion::decode(&mut version) - .map_err(|_| - WasmError::Instantiation("failed to decode \"Core_version\" result".into()) - ) + sp_api::RuntimeVersion::decode(&mut version).map_err(|_| { + WasmError::Instantiation("failed to decode \"Core_version\" result".into()) + }) } else { Ok(v) } } fn decode_runtime_apis(apis: &[u8]) -> Result, WasmError> { - use std::convert::TryFrom; use sp_api::RUNTIME_API_INFO_SIZE; + use std::convert::TryFrom; apis.chunks(RUNTIME_API_INFO_SIZE) .map(|chunk| { @@ -367,9 +364,7 @@ fn decode_runtime_apis(apis: &[u8]) -> Result, WasmError> { // completely divide by `RUNTIME_API_INFO_SIZE`. <[u8; RUNTIME_API_INFO_SIZE]>::try_from(chunk) .map(sp_api::deserialize_runtime_api_info) - .map_err(|_| { - WasmError::Other("a clipped runtime api info declaration".to_owned()) - }) + .map_err(|_| WasmError::Other("a clipped runtime api info declaration".to_owned())) }) .collect::, WasmError>>() } @@ -379,9 +374,7 @@ fn decode_runtime_apis(apis: &[u8]) -> Result, WasmError> { /// /// If there are no such sections, it returns `None`. If there is an error during decoding those /// sections, `Err` will be returned. -pub fn read_embedded_version( - blob: &RuntimeBlob, -) -> Result, WasmError> { +pub fn read_embedded_version(blob: &RuntimeBlob) -> Result, WasmError> { if let Some(mut version_section) = blob.custom_section_contents("runtime_version") { // We do not use `decode_version` here because the runtime_version section is not supposed // to ever contain a legacy version. Apart from that `decode_version` relies on presence @@ -389,9 +382,7 @@ pub fn read_embedded_version( // the structure found in the `runtime_version` always contain an empty `apis` field. Therefore // the version read will be mistakenly treated as an legacy one. let mut decoded_version = sp_api::RuntimeVersion::decode(&mut version_section) - .map_err(|_| - WasmError::Instantiation("failed to decode version section".into()) - )?; + .map_err(|_| WasmError::Instantiation("failed to decode version section".into()))?; // Don't stop on this and check if there is a special section that encodes all runtime APIs. if let Some(apis_section) = blob.custom_section_contents("runtime_apis") { @@ -443,10 +434,10 @@ fn create_versioned_wasm_runtime( // The following unwind safety assertion is OK because if the method call panics, the // runtime will be dropped. let runtime = AssertUnwindSafe(runtime.as_ref()); - crate::native_executor::with_externalities_safe( - &mut **ext, - move || runtime.new_instance()?.call("Core_version".into(), &[]) - ).map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? + crate::native_executor::with_externalities_safe(&mut **ext, move || { + runtime.new_instance()?.call("Core_version".into(), &[]) + }) + .map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? }; if let Ok(version_buf) = version_result { @@ -457,23 +448,16 @@ fn create_versioned_wasm_runtime( let mut instances = Vec::with_capacity(max_instances); instances.resize_with(max_instances, || Mutex::new(None)); - Ok(VersionedRuntime { - code_hash, - module: runtime, - version, - heap_pages, - wasm_method, - instances, - }) + Ok(VersionedRuntime { code_hash, module: runtime, version, heap_pages, wasm_method, instances }) } #[cfg(test)] mod tests { use super::*; - use sp_wasm_interface::HostFunctions; + use codec::Encode; use sp_api::{Core, RuntimeApiInfo}; + use sp_wasm_interface::HostFunctions; use substrate_test_runtime::Block; - use codec::Encode; #[test] fn host_functions_are_equal() { @@ -533,7 +517,8 @@ mod tests { let wasm = sp_maybe_compressed_blob::decompress( substrate_test_runtime::wasm_binary_unwrap(), sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT, - ).expect("Decompressing works"); + ) + .expect("Decompressing works"); let runtime_version = RuntimeVersion { spec_name: "test_replace".into(), @@ -545,10 +530,8 @@ mod tests { transaction_version: 100, }; - let embedded = sp_version::embed::embed_runtime_version( - &wasm, - runtime_version.clone(), - ).expect("Embedding works"); + let embedded = sp_version::embed::embed_runtime_version(&wasm, runtime_version.clone()) + .expect("Embedding works"); let blob = RuntimeBlob::new(&embedded).expect("Embedded blob is valid"); let read_version = read_embedded_version(&blob) diff --git a/substrate/client/executor/wasmi/src/lib.rs b/substrate/client/executor/wasmi/src/lib.rs index 1bafa39494098430213298328678c1a73358c637..d11d867e9a1bfafb46e74ab83d304ecc57fc15a9 100644 --- a/substrate/client/executor/wasmi/src/lib.rs +++ b/substrate/client/executor/wasmi/src/lib.rs @@ -18,25 +18,26 @@ //! This crate provides an implementation of `WasmModule` that is baked by wasmi. -use std::{str, cell::RefCell, sync::Arc}; -use wasmi::{ - Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, - FuncInstance, memory_units::Pages, - RuntimeValue::{I32, I64, self}, +use codec::{Decode, Encode}; +use log::{debug, error, trace}; +use sc_executor_common::{ + error::{Error, WasmError}, + runtime_blob::{DataSegmentsSnapshot, RuntimeBlob}, + sandbox, + wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, }; -use codec::{Encode, Decode}; use sp_core::sandbox as sandbox_primitives; -use log::{error, trace, debug}; +use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{ - FunctionContext, Pointer, WordSize, Sandbox, MemoryId, Result as WResult, Function, + Function, FunctionContext, MemoryId, Pointer, Result as WResult, Sandbox, WordSize, }; -use sp_runtime_interface::unpack_ptr_and_len; -use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance, InvokeMethod}; -use sc_executor_common::{ - error::{Error, WasmError}, - sandbox, +use std::{cell::RefCell, str, sync::Arc}; +use wasmi::{ + memory_units::Pages, + FuncInstance, ImportsBuilder, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, + RuntimeValue::{self, I32, I64}, + TableRef, }; -use sc_executor_common::runtime_blob::{RuntimeBlob, DataSegmentsSnapshot}; struct FunctionExecutor<'a> { sandbox_store: sandbox::Store, @@ -109,16 +110,14 @@ impl<'a> FunctionContext for FunctionExecutor<'a> { fn allocate_memory(&mut self, size: WordSize) -> WResult> { let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.allocate(mem, size).map_err(|e| e.to_string()) - }) + self.memory + .with_direct_access_mut(|mem| heap.allocate(mem, size).map_err(|e| e.to_string())) } fn deallocate_memory(&mut self, ptr: Pointer) -> WResult<()> { let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.deallocate(mem, ptr).map_err(|e| e.to_string()) - }) + self.memory + .with_direct_access_mut(|mem| heap.deallocate(mem, ptr).map_err(|e| e.to_string())) } fn sandbox(&mut self) -> &mut dyn Sandbox { @@ -173,11 +172,7 @@ impl<'a> Sandbox for FunctionExecutor<'a> { self.sandbox_store.memory_teardown(memory_id).map_err(|e| e.to_string()) } - fn memory_new( - &mut self, - initial: u32, - maximum: u32, - ) -> WResult { + fn memory_new(&mut self, initial: u32, maximum: u32) -> WResult { self.sandbox_store.new_memory(initial, maximum).map_err(|e| e.to_string()) } @@ -213,7 +208,7 @@ impl<'a> Sandbox for FunctionExecutor<'a> { self.write_memory(return_val, val).map_err(|_| "Return value buffer is OOB")?; Ok(sandbox_primitives::ERR_OK) }) - } + }, Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), } } @@ -231,9 +226,12 @@ impl<'a> Sandbox for FunctionExecutor<'a> { ) -> WResult { // Extract a dispatch thunk from instance's table by the specified index. let dispatch_thunk = { - let table = self.table.as_ref() + let table = self + .table + .as_ref() .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")?; - table.get(dispatch_thunk_id) + table + .get(dispatch_thunk_id) .map_err(|_| "dispatch_thunk_idx is out of the table bounds")? .ok_or_else(|| "dispatch_thunk_idx points on an empty table entry")? }; @@ -248,8 +246,7 @@ impl<'a> Sandbox for FunctionExecutor<'a> { .map(|i| i.register(&mut self.sandbox_store)) { Ok(instance_idx) => instance_idx, - Err(sandbox::InstantiationError::StartTrapped) => - sandbox_primitives::ERR_EXECUTION, + Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, Err(_) => sandbox_primitives::ERR_MODULE, }; @@ -288,7 +285,7 @@ struct Resolver<'a> { impl<'a> Resolver<'a> { fn new( - host_functions: &'a[&'static dyn Function], + host_functions: &'a [&'static dyn Function], allow_missing_func_imports: bool, heap_pages: usize, ) -> Resolver<'a> { @@ -303,25 +300,23 @@ impl<'a> Resolver<'a> { } impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { - fn resolve_func(&self, name: &str, signature: &wasmi::Signature) - -> std::result::Result - { + fn resolve_func( + &self, + name: &str, + signature: &wasmi::Signature, + ) -> std::result::Result { let signature = sp_wasm_interface::Signature::from(signature); for (function_index, function) in self.host_functions.iter().enumerate() { if name == function.name() { if signature == function.signature() { - return Ok( - wasmi::FuncInstance::alloc_host(signature.into(), function_index), - ) + return Ok(wasmi::FuncInstance::alloc_host(signature.into(), function_index)) } else { - return Err(wasmi::Error::Instantiation( - format!( - "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", - function.name(), - signature, - function.signature(), - ), - )) + return Err(wasmi::Error::Instantiation(format!( + "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", + function.name(), + signature, + function.signature(), + ))) } } } @@ -333,9 +328,7 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { Ok(wasmi::FuncInstance::alloc_host(signature.into(), id)) } else { - Err(wasmi::Error::Instantiation( - format!("Export {} not found", name), - )) + Err(wasmi::Error::Instantiation(format!("Export {} not found", name))) } } @@ -346,15 +339,14 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { ) -> Result { if field_name == "memory" { match &mut *self.import_memory.borrow_mut() { - Some(_) => Err(wasmi::Error::Instantiation( - "Memory can not be imported twice!".into(), - )), + Some(_) => + Err(wasmi::Error::Instantiation("Memory can not be imported twice!".into())), memory_ref @ None => { if memory_type - .maximum() - .map(|m| m.saturating_sub(memory_type.initial())) - .map(|m| self.heap_pages > m as usize) - .unwrap_or(false) + .maximum() + .map(|m| m.saturating_sub(memory_type.initial())) + .map(|m| self.heap_pages > m as usize) + .unwrap_or(false) { Err(wasmi::Error::Instantiation(format!( "Heap pages ({}) is greater than imported memory maximum ({}).", @@ -372,35 +364,40 @@ impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { *memory_ref = Some(memory.clone()); Ok(memory) } - } + }, } } else { - Err(wasmi::Error::Instantiation( - format!("Unknown memory reference with name: {}", field_name), - )) + Err(wasmi::Error::Instantiation(format!( + "Unknown memory reference with name: {}", + field_name + ))) } } } impl<'a> wasmi::Externals for FunctionExecutor<'a> { - fn invoke_index(&mut self, index: usize, args: wasmi::RuntimeArgs) - -> Result, wasmi::Trap> - { + fn invoke_index( + &mut self, + index: usize, + args: wasmi::RuntimeArgs, + ) -> Result, wasmi::Trap> { let mut args = args.as_ref().iter().copied().map(Into::into); if let Some(function) = self.host_functions.get(index) { - function.execute(self, &mut args) + function + .execute(self, &mut args) .map_err(|msg| Error::FunctionExecution(function.name().to_string(), msg)) .map_err(wasmi::Trap::from) .map(|v| v.map(Into::into)) - } else if self.allow_missing_func_imports - && index >= self.host_functions.len() - && index < self.host_functions.len() + self.missing_functions.len() + } else if self.allow_missing_func_imports && + index >= self.host_functions.len() && + index < self.host_functions.len() + self.missing_functions.len() { Err(Error::from(format!( "Function `{}` is only a stub. Calling a stub is not allowed.", self.missing_functions[index - self.host_functions.len()], - )).into()) + )) + .into()) } else { Err(Error::from(format!("Could not find host function with index: {}", index)).into()) } @@ -462,25 +459,26 @@ fn call_in_wasm_module( function_executor.write_memory(offset, data)?; let result = match method { - InvokeMethod::Export(method) => { - module_instance.invoke_export( - method, - &[I32(u32::from(offset) as i32), I32(data.len() as i32)], - &mut function_executor, - ) - }, + InvokeMethod::Export(method) => module_instance.invoke_export( + method, + &[I32(u32::from(offset) as i32), I32(data.len() as i32)], + &mut function_executor, + ), InvokeMethod::Table(func_ref) => { - let func = table.ok_or(Error::NoTable)? + let func = table + .ok_or(Error::NoTable)? .get(func_ref)? .ok_or(Error::NoTableEntryWithIndex(func_ref))?; FuncInstance::invoke( &func, &[I32(u32::from(offset) as i32), I32(data.len() as i32)], &mut function_executor, - ).map_err(Into::into) + ) + .map_err(Into::into) }, InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let dispatcher = table.ok_or(Error::NoTable)? + let dispatcher = table + .ok_or(Error::NoTable)? .get(dispatcher_ref)? .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; @@ -488,7 +486,8 @@ fn call_in_wasm_module( &dispatcher, &[I32(func as _), I32(u32::from(offset) as i32), I32(data.len() as i32)], &mut function_executor, - ).map_err(Into::into) + ) + .map_err(Into::into) }, }; @@ -518,15 +517,12 @@ fn instantiate_module( ) -> Result<(ModuleRef, Vec, MemoryRef), Error> { let resolver = Resolver::new(host_functions, allow_missing_func_imports, heap_pages); // start module instantiation. Don't run 'start' function yet. - let intermediate_instance = ModuleInstance::new( - module, - &ImportsBuilder::new().with_resolver("env", &resolver), - )?; + let intermediate_instance = + ModuleInstance::new(module, &ImportsBuilder::new().with_resolver("env", &resolver))?; // Verify that the module has the heap base global variable. let _ = get_heap_base(intermediate_instance.not_started_instance())?; - // Get the memory reference. Runtimes should import memory, but to be backwards // compatible we also support exported memory. let memory = match resolver.import_memory.into_inner() { @@ -541,7 +537,7 @@ fn instantiate_module( memory.grow(Pages(heap_pages)).map_err(|_| Error::Runtime)?; memory - } + }, }; if intermediate_instance.has_start() { @@ -592,9 +588,7 @@ impl GlobalValsSnapshot { // the instance should be the same as used for preserving and // we iterate the same way it as we do it for preserving values that means that the // types should be the same and all the values are mutable. So no error is expected/ - global_ref - .set(*global_val) - .map_err(|_| WasmError::ApplySnapshotFailed)?; + global_ref.set(*global_val).map_err(|_| WasmError::ApplySnapshotFailed)?; } Ok(()) } @@ -624,7 +618,8 @@ impl WasmModule for WasmiRuntime { &self.module, &self.host_functions, self.allow_missing_func_imports, - ).map_err(|e| WasmError::Instantiation(e.to_string()))?; + ) + .map_err(|e| WasmError::Instantiation(e.to_string()))?; Ok(Box::new(WasmiInstance { instance, @@ -646,11 +641,11 @@ pub fn create_runtime( host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, ) -> Result { - let data_segments_snapshot = DataSegmentsSnapshot::take(&blob) - .map_err(|e| WasmError::Other(e.to_string()))?; + let data_segments_snapshot = + DataSegmentsSnapshot::take(&blob).map_err(|e| WasmError::Other(e.to_string()))?; - let module = Module::from_parity_wasm_module(blob.into_inner()) - .map_err(|_| WasmError::InvalidModule)?; + let module = + Module::from_parity_wasm_module(blob.into_inner()).map_err(|_| WasmError::InvalidModule)?; let global_vals_snapshot = { let (instance, _, _) = instantiate_module( @@ -734,7 +729,7 @@ impl WasmInstance for WasmiInstance { .as_global() .ok_or_else(|| format!("`{}` is not a global", name))? .get() - .into() + .into(), )), None => Ok(None), } diff --git a/substrate/client/executor/wasmtime/src/host.rs b/substrate/client/executor/wasmtime/src/host.rs index 3f5ac0560a6d79b8634470250d24bdad1959b2f0..ee0e82928db240b0c75a12c7ed8e050464054b72 100644 --- a/substrate/client/executor/wasmtime/src/host.rs +++ b/substrate/client/executor/wasmtime/src/host.rs @@ -19,16 +19,17 @@ //! This module defines `HostState` and `HostContext` structs which provide logic and state //! required for execution of host. -use crate::instance_wrapper::InstanceWrapper; -use crate::util; -use std::{cell::RefCell, rc::Rc}; +use crate::{instance_wrapper::InstanceWrapper, util}; +use codec::{Decode, Encode}; use log::trace; -use codec::{Encode, Decode}; use sc_allocator::FreeingBumpHeapAllocator; -use sc_executor_common::error::Result; -use sc_executor_common::sandbox::{self, SandboxCapabilities, SupervisorFuncIndex}; +use sc_executor_common::{ + error::Result, + sandbox::{self, SandboxCapabilities, SupervisorFuncIndex}, +}; use sp_core::sandbox as sandbox_primitives; use sp_wasm_interface::{FunctionContext, MemoryId, Pointer, Sandbox, WordSize}; +use std::{cell::RefCell, rc::Rc}; use wasmtime::{Func, Val}; /// Wrapper type for pointer to a Wasm table entry. @@ -108,7 +109,7 @@ impl<'a> SandboxCapabilities for HostContext<'a> { "Supervisor function returned {} results, expected 1", ret_vals.len() ) - .into()); + .into()) } else { &ret_vals[0] }; @@ -116,9 +117,9 @@ impl<'a> SandboxCapabilities for HostContext<'a> { if let Some(ret_val) = ret_val.i64() { Ok(ret_val) } else { - return Err("Supervisor function returned unexpected result!".into()); + return Err("Supervisor function returned unexpected result!".into()) } - } + }, Err(err) => Err(err.to_string().into()), } } @@ -130,15 +131,11 @@ impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> { address: Pointer, dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { - self.instance - .read_memory_into(address, dest) - .map_err(|e| e.to_string()) + self.instance.read_memory_into(address, dest).map_err(|e| e.to_string()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.instance - .write_memory_from(address, data) - .map_err(|e| e.to_string()) + self.instance.write_memory_from(address, data).map_err(|e| e.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { @@ -166,11 +163,8 @@ impl<'a> Sandbox for HostContext<'a> { buf_ptr: Pointer, buf_len: WordSize, ) -> sp_wasm_interface::Result { - let sandboxed_memory = self - .sandbox_store - .borrow() - .memory(memory_id) - .map_err(|e| e.to_string())?; + let sandboxed_memory = + self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; sandboxed_memory.with_direct_access(|sandboxed_memory| { let len = buf_len as usize; let src_range = match util::checked_range(offset as usize, len, sandboxed_memory.len()) @@ -200,11 +194,8 @@ impl<'a> Sandbox for HostContext<'a> { val_ptr: Pointer, val_len: WordSize, ) -> sp_wasm_interface::Result { - let sandboxed_memory = self - .sandbox_store - .borrow() - .memory(memory_id) - .map_err(|e| e.to_string())?; + let sandboxed_memory = + self.sandbox_store.borrow().memory(memory_id).map_err(|e| e.to_string())?; sandboxed_memory.with_direct_access_mut(|sandboxed_memory| { let len = val_len as usize; let supervisor_mem_size = self.instance.memory_size() as usize; @@ -259,11 +250,8 @@ impl<'a> Sandbox for HostContext<'a> { .map(Into::into) .collect::>(); - let instance = self - .sandbox_store - .borrow() - .instance(instance_id) - .map_err(|e| e.to_string())?; + let instance = + self.sandbox_store.borrow().instance(instance_id).map_err(|e| e.to_string())?; let result = instance.invoke(export_name, &args, self, state); match result { @@ -278,7 +266,7 @@ impl<'a> Sandbox for HostContext<'a> { .map_err(|_| "can't write return value")?; Ok(sandbox_primitives::ERR_OK) }) - } + }, Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), } } diff --git a/substrate/client/executor/wasmtime/src/imports.rs b/substrate/client/executor/wasmtime/src/imports.rs index f66e3042fba52af7b55f130b8d1eec1db106052f..0e5094db51195fa20180b11b7f64d36f0487b8d3 100644 --- a/substrate/client/executor/wasmtime/src/imports.rs +++ b/substrate/client/executor/wasmtime/src/imports.rs @@ -21,8 +21,8 @@ use sc_executor_common::error::WasmError; use sp_wasm_interface::{Function, ValueType}; use std::any::Any; use wasmtime::{ - Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, - Trap, Val, Store, + Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, Store, + Trap, Val, }; pub struct Imports { @@ -51,36 +51,29 @@ pub fn resolve_imports( "host doesn't provide any imports from non-env module: {}:{}", import_ty.module(), name, - ))); + ))) } let resolved = match name { "memory" => { memory_import_index = Some(externs.len()); resolve_memory_import(store, &import_ty, heap_pages)? - } - _ => resolve_func_import( - store, - &import_ty, - host_functions, - allow_missing_func_imports, - )?, + }, + _ => + resolve_func_import(store, &import_ty, host_functions, allow_missing_func_imports)?, }; externs.push(resolved); } - Ok(Imports { - memory_import_index, - externs, - }) + Ok(Imports { memory_import_index, externs }) } /// When the module linking proposal is supported the import's name can be `None`. /// Because we are not using this proposal we could safely unwrap the name. /// However, we opt for an error in order to avoid panics at all costs. fn import_name<'a, 'b: 'a>(import: &'a ImportType<'b>) -> Result<&'a str, WasmError> { - let name = import.name().ok_or_else(|| + let name = import.name().ok_or_else(|| { WasmError::Other("The module linking proposal is not supported.".to_owned()) - )?; + })?; Ok(name) } @@ -91,21 +84,17 @@ fn resolve_memory_import( ) -> Result { let requested_memory_ty = match import_ty.ty() { ExternType::Memory(memory_ty) => memory_ty, - _ => { + _ => return Err(WasmError::Other(format!( "this import must be of memory type: {}:{}", import_ty.module(), import_name(&import_ty)?, - ))) - } + ))), }; // Increment the min (a.k.a initial) number of pages by `heap_pages` and check if it exceeds the // maximum specified by the import. - let initial = requested_memory_ty - .limits() - .min() - .saturating_add(heap_pages); + let initial = requested_memory_ty.limits().min().saturating_add(heap_pages); if let Some(max) = requested_memory_ty.limits().max() { if initial > max { return Err(WasmError::Other(format!( @@ -113,7 +102,7 @@ fn resolve_memory_import( by the runtime wasm module {}", initial, max, - ))); + ))) } } @@ -137,37 +126,31 @@ fn resolve_func_import( let func_ty = match import_ty.ty() { ExternType::Func(func_ty) => func_ty, - _ => { + _ => return Err(WasmError::Other(format!( "host doesn't provide any non function imports besides 'memory': {}:{}", import_ty.module(), name, - ))); - } + ))), }; - let host_func = match host_functions - .iter() - .find(|host_func| host_func.name() == name) - { + let host_func = match host_functions.iter().find(|host_func| host_func.name() == name) { Some(host_func) => host_func, - None if allow_missing_func_imports => { - return Ok(MissingHostFuncHandler::new(import_ty)?.into_extern(store, &func_ty)); - } - None => { + None if allow_missing_func_imports => + return Ok(MissingHostFuncHandler::new(import_ty)?.into_extern(store, &func_ty)), + None => return Err(WasmError::Other(format!( "host doesn't provide such function: {}:{}", import_ty.module(), name, - ))); - } + ))), }; if &func_ty != &wasmtime_func_sig(*host_func) { return Err(WasmError::Other(format!( "signature mismatch for: {}:{}", import_ty.module(), name, - ))); + ))) } Ok(HostFuncHandler::new(*host_func).into_extern(store)) @@ -218,7 +201,7 @@ fn call_static( ); wasmtime_results[0] = util::into_wasmtime_val(ret_val); Ok(()) - } + }, Ok(None) => { debug_assert!( wasmtime_results.len() == 0, @@ -226,26 +209,22 @@ fn call_static( correspond to the number of results returned by the host function", ); Ok(()) - } + }, Err(msg) => Err(Trap::new(msg)), } } impl HostFuncHandler { fn new(host_func: &'static dyn Function) -> Self { - Self { - host_func, - } + Self { host_func } } fn into_extern(self, store: &Store) -> Extern { let host_func = self.host_func; let func_ty = wasmtime_func_sig(self.host_func); - let func = Func::new(store, func_ty, - move |_, params, result| { - call_static(host_func, params, result) - } - ); + let func = Func::new(store, func_ty, move |_, params, result| { + call_static(host_func, params, result) + }); Extern::Func(func) } } @@ -266,28 +245,17 @@ impl MissingHostFuncHandler { fn into_extern(self, store: &Store, func_ty: &FuncType) -> Extern { let Self { module, name } = self; - let func = Func::new(store, func_ty.clone(), - move |_, _, _| Err(Trap::new(format!( - "call to a missing function {}:{}", - module, name - ))) - ); + let func = Func::new(store, func_ty.clone(), move |_, _, _| { + Err(Trap::new(format!("call to a missing function {}:{}", module, name))) + }); Extern::Func(func) } } fn wasmtime_func_sig(func: &dyn Function) -> wasmtime::FuncType { let signature = func.signature(); - let params = signature - .args - .iter() - .cloned() - .map(into_wasmtime_val_type); - let results = signature - .return_value - .iter() - .cloned() - .map(into_wasmtime_val_type); + let params = signature.args.iter().cloned().map(into_wasmtime_val_type); + let results = signature.return_value.iter().cloned().map(into_wasmtime_val_type); wasmtime::FuncType::new(params, results) } diff --git a/substrate/client/executor/wasmtime/src/instance_wrapper.rs b/substrate/client/executor/wasmtime/src/instance_wrapper.rs index 816099aee8049f07ad749fb10df21bab8fc77429..80cf2b60f49243a68bcc01822d56a6ee79e98b28 100644 --- a/substrate/client/executor/wasmtime/src/instance_wrapper.rs +++ b/substrate/client/executor/wasmtime/src/instance_wrapper.rs @@ -19,26 +19,23 @@ //! Defines data and logic needed for interaction with an WebAssembly instance of a substrate //! runtime module. -use crate::util; -use crate::imports::Imports; +use crate::{imports::Imports, util}; -use std::{slice, marker}; use sc_executor_common::{ error::{Error, Result}, runtime_blob, wasm_runtime::InvokeMethod, }; -use sp_wasm_interface::{Pointer, WordSize, Value}; -use wasmtime::{Instance, Module, Memory, Table, Val, Func, Extern, Global, Store}; +use sp_wasm_interface::{Pointer, Value, WordSize}; +use std::{marker, slice}; +use wasmtime::{Extern, Func, Global, Instance, Memory, Module, Store, Table, Val}; /// Invoked entrypoint format. pub enum EntryPointType { /// Direct call. /// /// Call is made by providing only payload reference and length. - Direct { - entrypoint: wasmtime::TypedFunc<(u32, u32), u64>, - }, + Direct { entrypoint: wasmtime::TypedFunc<(u32, u32), u64> }, /// Indirect call. /// /// Call is made by providing payload reference and length, and extra argument @@ -66,17 +63,10 @@ impl EntryPoint { } match self.call_type { - EntryPointType::Direct { ref entrypoint } => { - entrypoint.call((data_ptr, data_len)).map_err(handle_trap) - } - EntryPointType::Wrapped { - func, - ref dispatcher, - } => { - dispatcher - .call((func, data_ptr, data_len)) - .map_err(handle_trap) - } + EntryPointType::Direct { ref entrypoint } => + entrypoint.call((data_ptr, data_len)).map_err(handle_trap), + EntryPointType::Wrapped { func, ref dispatcher } => + dispatcher.call((func, data_ptr, data_len)).map_err(handle_trap), } } @@ -85,9 +75,7 @@ impl EntryPoint { .typed::<(u32, u32), u64>() .map_err(|_| "Invalid signature for direct entry point")? .clone(); - Ok(Self { - call_type: EntryPointType::Direct { entrypoint }, - }) + Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) } pub fn wrapped( @@ -98,9 +86,7 @@ impl EntryPoint { .typed::<(u32, u32, u32), u64>() .map_err(|_| "Invalid signature for wrapped entry point")? .clone(); - Ok(Self { - call_type: EntryPointType::Wrapped { func, dispatcher }, - }) + Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) } } @@ -127,7 +113,6 @@ fn extern_memory(extern_: &Extern) -> Option<&Memory> { } } - fn extern_global(extern_: &Extern) -> Option<&Global> { match extern_ { Extern::Global(glob) => Some(glob), @@ -156,15 +141,13 @@ impl InstanceWrapper { .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; let memory = match imports.memory_import_index { - Some(memory_idx) => { - extern_memory(&imports.externs[memory_idx]) - .expect("only memory can be at the `memory_idx`; qed") - .clone() - } + Some(memory_idx) => extern_memory(&imports.externs[memory_idx]) + .expect("only memory can be at the `memory_idx`; qed") + .clone(), None => { let memory = get_linear_memory(&instance)?; if !memory.grow(heap_pages).is_ok() { - return Err("failed top increase the linear memory size".into()); + return Err("failed top increase the linear memory size".into()) } memory }, @@ -186,42 +169,38 @@ impl InstanceWrapper { Ok(match method { InvokeMethod::Export(method) => { // Resolve the requested method and verify that it has a proper signature. - let export = self - .instance - .get_export(method) - .ok_or_else(|| Error::from(format!("Exported method {} is not found", method)))?; + let export = self.instance.get_export(method).ok_or_else(|| { + Error::from(format!("Exported method {} is not found", method)) + })?; let func = extern_func(&export) .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? .clone(); - EntryPoint::direct(func) - .map_err(|_| - Error::from(format!( - "Exported function '{}' has invalid signature.", - method, - )) - )? + EntryPoint::direct(func).map_err(|_| { + Error::from(format!("Exported function '{}' has invalid signature.", method,)) + })? }, InvokeMethod::Table(func_ref) => { - let table = self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; - let val = table.get(func_ref) - .ok_or(Error::NoTableEntryWithIndex(func_ref))?; + let table = + self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; + let val = table.get(func_ref).ok_or(Error::NoTableEntryWithIndex(func_ref))?; let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? .ok_or(Error::FunctionRefIsNull(func_ref))? .clone(); - EntryPoint::direct(func) - .map_err(|_| - Error::from(format!( - "Function @{} in exported table has invalid signature for direct call.", - func_ref, - )) - )? - }, + EntryPoint::direct(func).map_err(|_| { + Error::from(format!( + "Function @{} in exported table has invalid signature for direct call.", + func_ref, + )) + })? + }, InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let table = self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; - let val = table.get(dispatcher_ref) + let table = + self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; + let val = table + .get(dispatcher_ref) .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; let dispatcher = val .funcref() @@ -229,13 +208,12 @@ impl InstanceWrapper { .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? .clone(); - EntryPoint::wrapped(dispatcher, func) - .map_err(|_| - Error::from(format!( - "Function @{} in exported table has invalid signature for wrapped call.", - dispatcher_ref, - )) - )? + EntryPoint::wrapped(dispatcher, func).map_err(|_| { + Error::from(format!( + "Function @{} in exported table has invalid signature for wrapped call.", + dispatcher_ref, + )) + })? }, }) } @@ -426,7 +404,7 @@ impl InstanceWrapper { /// relied upon. Thus this function acts as a hint. pub fn decommit(&self) { if self.memory.data_size() == 0 { - return; + return } cfg_if::cfg_if! { diff --git a/substrate/client/executor/wasmtime/src/lib.rs b/substrate/client/executor/wasmtime/src/lib.rs index 74b1150f06aeaa211164b3931322105ed775b287..62b0b205f6de658ad4ba56c7a3693d57d7f4a840 100644 --- a/substrate/client/executor/wasmtime/src/lib.rs +++ b/substrate/client/executor/wasmtime/src/lib.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -///! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +/// ! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. mod host; mod imports; mod instance_wrapper; @@ -28,6 +28,6 @@ mod util; mod tests; pub use runtime::{ - create_runtime, create_runtime_from_artifact, prepare_runtime_artifact, Config, Semantics, - DeterministicStackLimit, + create_runtime, create_runtime_from_artifact, prepare_runtime_artifact, Config, + DeterministicStackLimit, Semantics, }; diff --git a/substrate/client/executor/wasmtime/src/runtime.rs b/substrate/client/executor/wasmtime/src/runtime.rs index 0a3c0488a247df1d07cdb33bcb5f26c23c8cb8ba..b69eac6266bb1c8939a17296444c705a3483a9b6 100644 --- a/substrate/client/executor/wasmtime/src/runtime.rs +++ b/substrate/client/executor/wasmtime/src/runtime.rs @@ -18,22 +18,26 @@ //! Defines the compiled Wasm runtime that uses Wasmtime internally. -use crate::host::HostState; -use crate::imports::{Imports, resolve_imports}; -use crate::instance_wrapper::{InstanceWrapper, EntryPoint}; -use crate::state_holder; - -use std::{path::PathBuf, rc::Rc}; -use std::sync::Arc; -use std::path::Path; +use crate::{ + host::HostState, + imports::{resolve_imports, Imports}, + instance_wrapper::{EntryPoint, InstanceWrapper}, + state_holder, +}; + +use sc_allocator::FreeingBumpHeapAllocator; use sc_executor_common::{ error::{Result, WasmError}, runtime_blob::{DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob}, - wasm_runtime::{WasmModule, WasmInstance, InvokeMethod}, + wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, }; -use sc_allocator::FreeingBumpHeapAllocator; use sp_runtime_interface::unpack_ptr_and_len; -use sp_wasm_interface::{Function, Pointer, WordSize, Value}; +use sp_wasm_interface::{Function, Pointer, Value, WordSize}; +use std::{ + path::{Path, PathBuf}, + rc::Rc, + sync::Arc, +}; use wasmtime::{Engine, Store}; enum Strategy { @@ -102,7 +106,8 @@ impl WasmModule for WasmtimeRuntime { // the mutable globals were collected. Here, it is easy to see that there is only a single // runtime blob and thus it's the same that was used for both creating the instance and // collecting the mutable globals. - let globals_snapshot = GlobalsSnapshot::take(&snapshot_data.mutable_globals, &instance_wrapper); + let globals_snapshot = + GlobalsSnapshot::take(&snapshot_data.mutable_globals, &instance_wrapper); Strategy::FastInstanceReuse { instance_wrapper: Rc::new(instance_wrapper), @@ -150,14 +155,15 @@ impl WasmInstance for WasmtimeInstance { globals_snapshot.apply(&**instance_wrapper); let allocator = FreeingBumpHeapAllocator::new(*heap_base); - let result = perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator); + let result = + perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator); // Signal to the OS that we are done with the linear memory and that it can be // reclaimed. instance_wrapper.decommit(); result - } + }, Strategy::RecreateInstance(instance_creator) => { let instance_wrapper = instance_creator.instantiate()?; let heap_base = instance_wrapper.extract_heap_base()?; @@ -165,18 +171,16 @@ impl WasmInstance for WasmtimeInstance { let allocator = FreeingBumpHeapAllocator::new(heap_base); perform_call(data, Rc::new(instance_wrapper), entrypoint, allocator) - } + }, } } fn get_global_const(&self, name: &str) -> Result> { match &self.strategy { - Strategy::FastInstanceReuse { - instance_wrapper, .. - } => instance_wrapper.get_global_val(name), - Strategy::RecreateInstance(instance_creator) => { - instance_creator.instantiate()?.get_global_val(name) - } + Strategy::FastInstanceReuse { instance_wrapper, .. } => + instance_wrapper.get_global_val(name), + Strategy::RecreateInstance(instance_creator) => + instance_creator.instantiate()?.get_global_val(name), } } @@ -186,10 +190,9 @@ impl WasmInstance for WasmtimeInstance { // We do not keep the wasm instance around, therefore there is no linear memory // associated with it. None - } - Strategy::FastInstanceReuse { - instance_wrapper, .. - } => Some(instance_wrapper.base_ptr()), + }, + Strategy::FastInstanceReuse { instance_wrapper, .. } => + Some(instance_wrapper.base_ptr()), } } } @@ -237,9 +240,8 @@ fn common_config(semantics: &Semantics) -> std::result::Result, ) -> std::result::Result { - do_create_runtime( - CodeSupplyMode::Artifact { compiled_artifact }, - config, - host_functions, - ) + do_create_runtime(CodeSupplyMode::Artifact { compiled_artifact }, config, host_functions) } /// # Safety @@ -456,16 +454,13 @@ unsafe fn do_create_runtime( let module = wasmtime::Module::new(&engine, &blob.serialize()) .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; - (module, Some(InstanceSnapshotData { - data_segments_snapshot, - mutable_globals, - })) + (module, Some(InstanceSnapshotData { data_segments_snapshot, mutable_globals })) } else { let module = wasmtime::Module::new(&engine, &blob.serialize()) .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; (module, None) } - } + }, CodeSupplyMode::Artifact { compiled_artifact } => { // SAFETY: The unsafity of `deserialize` is covered by this function. The // responsibilities to maintain the invariants are passed to the caller. @@ -473,16 +468,10 @@ unsafe fn do_create_runtime( .map_err(|e| WasmError::Other(format!("cannot deserialize module: {}", e)))?; (module, None) - } + }, }; - Ok(WasmtimeRuntime { - module: Arc::new(module), - snapshot_data, - config, - host_functions, - engine, - }) + Ok(WasmtimeRuntime { module: Arc::new(module), snapshot_data, config, host_functions, engine }) } fn instrument( diff --git a/substrate/client/executor/wasmtime/src/tests.rs b/substrate/client/executor/wasmtime/src/tests.rs index 4066a44194a132d5fc8bf21309420435ce958719..7933578b804995fe9e0cee6531c3891638df4d4d 100644 --- a/substrate/client/executor/wasmtime/src/tests.rs +++ b/substrate/client/executor/wasmtime/src/tests.rs @@ -16,12 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sc_executor_common::{ - runtime_blob::RuntimeBlob, - wasm_runtime::WasmModule, -}; +use codec::{Decode as _, Encode as _}; +use sc_executor_common::{runtime_blob::RuntimeBlob, wasm_runtime::WasmModule}; use sc_runtime_test::wasm_binary_unwrap; -use codec::{Encode as _, Decode as _}; use std::sync::Arc; type HostFunctions = sp_io::SubstrateHostFunctions; @@ -68,7 +65,7 @@ impl RuntimeBuilder { Some(wat) => { wasm = wat::parse_str(wat).unwrap(); &wasm - } + }, }; RuntimeBlob::uncompress_if_needed(&wasm) @@ -83,21 +80,20 @@ impl RuntimeBuilder { cache_path: None, semantics: crate::Semantics { fast_instance_reuse: self.fast_instance_reuse, - deterministic_stack_limit: - match self.deterministic_stack { - true => Some(crate::DeterministicStackLimit { - logical_max: 65536, - native_stack_max: 256 * 1024 * 1024, - }), - false => None, - }, + deterministic_stack_limit: match self.deterministic_stack { + true => Some(crate::DeterministicStackLimit { + logical_max: 65536, + native_stack_max: 256 * 1024 * 1024, + }), + false => None, + }, canonicalize_nans: self.canonicalize_nans, }, }, { use sp_wasm_interface::HostFunctions as _; HostFunctions::host_functions() - } + }, ) .expect("cannot create runtime"); @@ -113,9 +109,7 @@ fn test_nan_canonicalization() { builder.build() }; - let instance = runtime - .new_instance() - .expect("failed to instantiate a runtime"); + let instance = runtime.new_instance().expect("failed to instantiate a runtime"); /// A NaN with canonical payload bits. const CANONICAL_NAN_BITS: u32 = 0x7fc00000; @@ -142,10 +136,7 @@ fn test_nan_canonicalization() { let params = (u32::to_le_bytes(ARBITRARY_NAN_BITS), u32::to_le_bytes(1)).encode(); let res = { - let raw_result = instance.call_export( - "test_fp_f32add", - ¶ms, - ).unwrap(); + let raw_result = instance.call_export("test_fp_f32add", ¶ms).unwrap(); u32::from_le_bytes(<[u8; 4]>::decode(&mut &raw_result[..]).unwrap()) }; assert_eq!(res, CANONICAL_NAN_BITS); @@ -161,9 +152,7 @@ fn test_stack_depth_reaching() { builder.deterministic_stack(true); builder.build() }; - let instance = runtime - .new_instance() - .expect("failed to instantiate a runtime"); + let instance = runtime.new_instance().expect("failed to instantiate a runtime"); let err = instance.call_export("test-many-locals", &[]).unwrap_err(); diff --git a/substrate/client/finality-grandpa-warp-sync/src/lib.rs b/substrate/client/finality-grandpa-warp-sync/src/lib.rs index c0ef93e625fd8ef230f1f174ead5dea8af407071..c74c4d15f9f458b732440e2952bdfd12bb8b270d 100644 --- a/substrate/client/finality-grandpa-warp-sync/src/lib.rs +++ b/substrate/client/finality-grandpa-warp-sync/src/lib.rs @@ -17,17 +17,20 @@ //! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer. use codec::{Decode, Encode}; -use sc_network::config::{IncomingRequest, OutgoingResponse, ProtocolId, RequestResponseConfig}; -use sc_client_api::Backend; -use sp_runtime::traits::NumberFor; -use futures::channel::{mpsc, oneshot}; -use futures::stream::StreamExt; +use futures::{ + channel::{mpsc, oneshot}, + stream::StreamExt, +}; use log::debug; -use sp_runtime::traits::Block as BlockT; -use std::time::Duration; -use std::sync::Arc; -use sc_service::{SpawnTaskHandle, config::{Configuration, Role}}; +use sc_client_api::Backend; use sc_finality_grandpa::SharedAuthoritySet; +use sc_network::config::{IncomingRequest, OutgoingResponse, ProtocolId, RequestResponseConfig}; +use sc_service::{ + config::{Configuration, Role}, + SpawnTaskHandle, +}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::{sync::Arc, time::Duration}; mod proof; @@ -50,11 +53,8 @@ where generate_request_response_config(protocol_id.clone()) } else { // Allow both outgoing and incoming requests. - let (handler, request_response_config) = GrandpaWarpSyncRequestHandler::new( - protocol_id.clone(), - backend.clone(), - authority_set, - ); + let (handler, request_response_config) = + GrandpaWarpSyncRequestHandler::new(protocol_id.clone(), backend.clone(), authority_set); spawn_handle.spawn("grandpa-warp-sync", handler.run()); request_response_config } @@ -108,12 +108,7 @@ impl> GrandpaWarpSyncRequestHandler> GrandpaWarpSyncRequestHandler, pending_response: oneshot::Sender, ) -> Result<(), HandleRequestError> - where NumberFor: sc_finality_grandpa::BlockNumberOps, + where + NumberFor: sc_finality_grandpa::BlockNumberOps, { let request = Request::::decode(&mut &payload[..])?; @@ -133,26 +129,29 @@ impl> GrandpaWarpSyncRequestHandler: sc_finality_grandpa::BlockNumberOps, + where + NumberFor: sc_finality_grandpa::BlockNumberOps, { while let Some(request) = self.request_receiver.next().await { let IncomingRequest { peer, payload, pending_response } = request; match self.handle_request(payload, pending_response) { - Ok(()) => debug!(target: LOG_TARGET, "Handled grandpa warp sync request from {}.", peer), + Ok(()) => + debug!(target: LOG_TARGET, "Handled grandpa warp sync request from {}.", peer), Err(e) => debug!( target: LOG_TARGET, - "Failed to handle grandpa warp sync request from {}: {}", - peer, e, + "Failed to handle grandpa warp sync request from {}: {}", peer, e, ), } } diff --git a/substrate/client/finality-grandpa-warp-sync/src/proof.rs b/substrate/client/finality-grandpa-warp-sync/src/proof.rs index 87a62202678279ff3ee84a802113d6e9cbf40b68..d2484a800e63b318084d58b9e1d30fa67e03d488 100644 --- a/substrate/client/finality-grandpa-warp-sync/src/proof.rs +++ b/substrate/client/finality-grandpa-warp-sync/src/proof.rs @@ -72,7 +72,7 @@ impl WarpSyncProof { if begin_number > blockchain.info().finalized_number { return Err(HandleRequestError::InvalidRequest( "Start block is not finalized".to_string(), - )); + )) } let canon_hash = blockchain.hash(begin_number)?.expect( @@ -84,15 +84,15 @@ impl WarpSyncProof { if canon_hash != begin { return Err(HandleRequestError::InvalidRequest( "Start block is not in the finalized chain".to_string(), - )); + )) } let mut proofs = Vec::new(); let mut proofs_encoded_len = 0; let mut proof_limit_reached = false; - let set_changes = set_changes.iter_from(begin_number) - .ok_or(HandleRequestError::MissingData)?; + let set_changes = + set_changes.iter_from(begin_number).ok_or(HandleRequestError::MissingData)?; for (_, last_block) in set_changes { let header = blockchain.header(BlockId::Number(*last_block))?.expect( @@ -105,7 +105,7 @@ impl WarpSyncProof { // if it doesn't contain a signal for standard change then the set must have changed // through a forced changed, in which case we stop collecting proofs as the chain of // trust in authority handoffs was broken. - break; + break } let justification = blockchain @@ -119,10 +119,7 @@ impl WarpSyncProof { let justification = GrandpaJustification::::decode(&mut &justification[..])?; - let proof = WarpSyncFragment { - header: header.clone(), - justification, - }; + let proof = WarpSyncFragment { header: header.clone(), justification }; let proof_size = proof.encoded_size(); // Check for the limit. We remove some bytes from the maximum size, because we're only @@ -130,7 +127,7 @@ impl WarpSyncProof { // room for rest of the data (the size of the `Vec` and the boolean). if proofs_encoded_len + proof_size >= MAX_WARP_SYNC_PROOF_SIZE - 50 { proof_limit_reached = true; - break; + break } proofs_encoded_len += proof_size; @@ -158,19 +155,13 @@ impl WarpSyncProof { let header = blockchain.header(BlockId::Hash(latest_justification.target().1))? .expect("header hash corresponds to a justification in db; must exist in db as well; qed."); - proofs.push(WarpSyncFragment { - header, - justification: latest_justification, - }) + proofs.push(WarpSyncFragment { header, justification: latest_justification }) } true }; - let final_outcome = WarpSyncProof { - proofs, - is_finished, - }; + let final_outcome = WarpSyncProof { proofs, is_finished }; debug_assert!(final_outcome.encoded_size() <= MAX_WARP_SYNC_PROOF_SIZE); Ok(final_outcome) } @@ -196,8 +187,8 @@ impl WarpSyncProof { if proof.justification.target().1 != proof.header.hash() { return Err(HandleRequestError::InvalidProof( - "mismatch between header and justification".to_owned() - )); + "mismatch between header and justification".to_owned(), + )) } if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { @@ -208,7 +199,7 @@ impl WarpSyncProof { // set change. return Err(HandleRequestError::InvalidProof( "Header is missing authority set change digest".to_string(), - )); + )) } } @@ -249,12 +240,7 @@ mod tests { let mut authority_set_changes = Vec::new(); for n in 1..=100 { - let mut block = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let mut block = client.new_block(Default::default()).unwrap().build().unwrap().block; let mut new_authorities = None; @@ -277,10 +263,7 @@ mod tests { let digest = sp_runtime::generic::DigestItem::Consensus( sp_finality_grandpa::GRANDPA_ENGINE_ID, sp_finality_grandpa::ConsensusLog::ScheduledChange( - sp_finality_grandpa::ScheduledChange { - delay: 0u64, - next_authorities, - }, + sp_finality_grandpa::ScheduledChange { delay: 0u64, next_authorities }, ) .encode(), ); @@ -300,10 +283,7 @@ mod tests { let mut precommits = Vec::new(); for keyring in ¤t_authorities { - let precommit = finality_grandpa::Precommit { - target_hash, - target_number, - }; + let precommit = finality_grandpa::Precommit { target_hash, target_number }; let msg = finality_grandpa::Message::Precommit(precommit.clone()); let encoded = sp_finality_grandpa::localized_payload(42, current_set_id, &msg); @@ -318,18 +298,14 @@ mod tests { precommits.push(precommit); } - let commit = finality_grandpa::Commit { - target_hash, - target_number, - precommits, - }; + let commit = finality_grandpa::Commit { target_hash, target_number, precommits }; let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); client .finalize_block( BlockId::Hash(target_hash), - Some((GRANDPA_ENGINE_ID, justification.encode())) + Some((GRANDPA_ENGINE_ID, justification.encode())), ) .unwrap(); diff --git a/substrate/client/finality-grandpa/rpc/src/finality.rs b/substrate/client/finality-grandpa/rpc/src/finality.rs index cfd8f68e5ce607683088fb7e0cf41c3dc46db1c7..62e3502fc7180d30e974bdf4d5c54c44672d43ec 100644 --- a/substrate/client/finality-grandpa/rpc/src/finality.rs +++ b/substrate/client/finality-grandpa/rpc/src/finality.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sc_finality_grandpa::FinalityProofProvider; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -44,7 +44,6 @@ where &self, block: NumberFor, ) -> Result, sc_finality_grandpa::FinalityProofError> { - self.prove_finality(block) - .map(|x| x.map(|y| EncodedFinalityProof(y.into()))) + self.prove_finality(block).map(|x| x.map(|y| EncodedFinalityProof(y.into()))) } } diff --git a/substrate/client/finality-grandpa/rpc/src/lib.rs b/substrate/client/finality-grandpa/rpc/src/lib.rs index 2e7354e5fda6852da3d3301f5933c627a5b6a2d1..42d8630d10f832c34c6558640f0b63a1b3576d01 100644 --- a/substrate/client/finality-grandpa/rpc/src/lib.rs +++ b/substrate/client/finality-grandpa/rpc/src/lib.rs @@ -19,17 +19,16 @@ //! RPC API for GRANDPA. #![warn(missing_docs)] -use std::sync::Arc; -use futures::{FutureExt, TryFutureExt, TryStreamExt, StreamExt}; -use log::warn; -use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use jsonrpc_core::futures::{ + future::{Executor as Executor01, Future as Future01}, sink::Sink as Sink01, stream::Stream as Stream01, - future::Future as Future01, - future::Executor as Executor01, }; +use jsonrpc_derive::rpc; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use log::warn; +use std::sync::Arc; mod error; mod finality; @@ -40,8 +39,8 @@ use sc_finality_grandpa::GrandpaJustificationStream; use sp_runtime::traits::{Block as BlockT, NumberFor}; use finality::{EncodedFinalityProof, RpcFinalityProofProvider}; -use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; use notification::JustificationNotification; +use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; type FutureResult = Box + Send>; @@ -67,7 +66,7 @@ pub trait GrandpaApi { fn subscribe_justifications( &self, metadata: Self::Metadata, - subscriber: Subscriber + subscriber: Subscriber, ); /// Unsubscribe from receiving notifications about recently finalized blocks. @@ -79,16 +78,13 @@ pub trait GrandpaApi { fn unsubscribe_justifications( &self, metadata: Option, - id: SubscriptionId + id: SubscriptionId, ) -> jsonrpc_core::Result; /// Prove finality for the given block number by returning the Justification for the last block /// in the set and all the intermediary headers to link them together. #[rpc(name = "grandpa_proveFinality")] - fn prove_finality( - &self, - block: Number, - ) -> FutureResult>; + fn prove_finality(&self, block: Number) -> FutureResult>; } /// Implements the GrandpaApi RPC trait for interacting with GRANDPA. @@ -115,13 +111,7 @@ impl E: Executor01 + Send>> + Send + Sync + 'static, { let manager = SubscriptionManager::new(Arc::new(executor)); - Self { - authority_set, - voter_state, - justification_stream, - manager, - finality_proof_provider, - } + Self { authority_set, voter_state, justification_stream, manager, finality_proof_provider } } } @@ -145,10 +135,12 @@ where fn subscribe_justifications( &self, _metadata: Self::Metadata, - subscriber: Subscriber + subscriber: Subscriber, ) { - let stream = self.justification_stream.subscribe() - .map(|x| Ok::<_,()>(JustificationNotification::from(x))) + let stream = self + .justification_stream + .subscribe() + .map(|x| Ok::<_, ()>(JustificationNotification::from(x))) .map_err(|e| warn!("Notification stream error: {:?}", e)) .compat(); @@ -163,7 +155,7 @@ where fn unsubscribe_justifications( &self, _metadata: Option, - id: SubscriptionId + id: SubscriptionId, ) -> jsonrpc_core::Result { Ok(self.manager.cancel(id)) } @@ -181,7 +173,7 @@ where error::Error::ProveFinalityFailed(e) }) .map_err(jsonrpc_core::Error::from) - .compat() + .compat(), ) } } @@ -189,14 +181,13 @@ where #[cfg(test)] mod tests { use super::*; + use jsonrpc_core::{types::Params, Notification, Output}; use std::{collections::HashSet, convert::TryInto, sync::Arc}; - use jsonrpc_core::{Notification, Output, types::Params}; - use parity_scale_codec::{Encode, Decode}; + use parity_scale_codec::{Decode, Encode}; use sc_block_builder::{BlockBuilder, RecordProof}; use sc_finality_grandpa::{ - report, AuthorityId, GrandpaJustificationSender, GrandpaJustification, - FinalityProof, + report, AuthorityId, FinalityProof, GrandpaJustification, GrandpaJustificationSender, }; use sp_blockchain::HeaderBackend; use sp_core::crypto::Public; @@ -204,9 +195,7 @@ mod tests { use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use substrate_test_runtime_client::{ runtime::{Block, Header, H256}, - DefaultTestClientBuilderExt, - TestClientBuilderExt, - TestClientBuilder, + DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; struct TestAuthoritySet; @@ -253,14 +242,14 @@ mod tests { impl RpcFinalityProofProvider for TestFinalityProofProvider { fn rpc_prove_finality( &self, - _block: NumberFor + _block: NumberFor, ) -> Result, sc_finality_grandpa::FinalityProofError> { Ok(Some(EncodedFinalityProof( self.finality_proof .as_ref() .expect("Don't call rpc_prove_finality without setting the FinalityProof") .encode() - .into() + .into(), ))) } } @@ -290,17 +279,14 @@ mod tests { let background_rounds = vec![(1, past_round_state)].into_iter().collect(); - Some(report::VoterState { - background_rounds, - best_round: (2, best_round_state), - }) + Some(report::VoterState { background_rounds, best_round: (2, best_round_state) }) } } - fn setup_io_handler(voter_state: VoterState) -> ( - jsonrpc_core::MetaIoHandler, - GrandpaJustificationSender, - ) where + fn setup_io_handler( + voter_state: VoterState, + ) -> (jsonrpc_core::MetaIoHandler, GrandpaJustificationSender) + where VoterState: ReportVoterState + Send + Sync + 'static, { setup_io_handler_with_finality_proofs(voter_state, None) @@ -309,10 +295,8 @@ mod tests { fn setup_io_handler_with_finality_proofs( voter_state: VoterState, finality_proof: Option>, - ) -> ( - jsonrpc_core::MetaIoHandler, - GrandpaJustificationSender, - ) where + ) -> (jsonrpc_core::MetaIoHandler, GrandpaJustificationSender) + where VoterState: ReportVoterState + Send + Sync + 'static, { let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); @@ -345,7 +329,7 @@ mod tests { #[test] fn working_rpc_handler() { - let (io, _) = setup_io_handler(TestVoterState); + let (io, _) = setup_io_handler(TestVoterState); let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":1}"#; let response = "{\"jsonrpc\":\"2.0\",\"result\":{\ @@ -378,7 +362,8 @@ mod tests { let (meta, _) = setup_session(); // Subscribe - let sub_request = r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + let sub_request = + r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; let resp = io.handle_request_sync(sub_request, meta.clone()); let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); @@ -410,7 +395,8 @@ mod tests { let (meta, _) = setup_session(); // Subscribe - let sub_request = r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + let sub_request = + r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; let resp = io.handle_request_sync(sub_request, meta.clone()); let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); assert!(matches!(resp, Output::Success(_))); @@ -440,7 +426,10 @@ mod tests { RecordProof::No, Default::default(), &*backend, - ).unwrap().build().unwrap(); + ) + .unwrap() + .build() + .unwrap(); let block = built_block.block; let block_hash = block.hash(); @@ -501,8 +490,7 @@ mod tests { _ => panic!(), }; - let recv_sub_id: String = - serde_json::from_value(json_map["subscription"].take()).unwrap(); + let recv_sub_id: String = serde_json::from_value(json_map["subscription"].take()).unwrap(); let recv_justification: sp_core::Bytes = serde_json::from_value(json_map["result"].take()).unwrap(); let recv_justification: GrandpaJustification = @@ -520,10 +508,8 @@ mod tests { justification: create_justification().encode(), unknown_headers: vec![header(2)], }; - let (io, _) = setup_io_handler_with_finality_proofs( - TestVoterState, - Some(finality_proof.clone()), - ); + let (io, _) = + setup_io_handler_with_finality_proofs(TestVoterState, Some(finality_proof.clone())); let request = "{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_proveFinality\",\"params\":[42],\"id\":1}"; diff --git a/substrate/client/finality-grandpa/rpc/src/notification.rs b/substrate/client/finality-grandpa/rpc/src/notification.rs index 4c9141be3631ae4acd359da36ecc0e6a247bd11d..68944e903e0fba0df2fde251f95dd7bdc8eded00 100644 --- a/substrate/client/finality-grandpa/rpc/src/notification.rs +++ b/substrate/client/finality-grandpa/rpc/src/notification.rs @@ -16,10 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use serde::{Serialize, Deserialize}; use parity_scale_codec::Encode; -use sp_runtime::traits::Block as BlockT; use sc_finality_grandpa::GrandpaJustification; +use serde::{Deserialize, Serialize}; +use sp_runtime::traits::Block as BlockT; /// An encoded justification proving that the given header has been finalized #[derive(Clone, Serialize, Deserialize)] diff --git a/substrate/client/finality-grandpa/rpc/src/report.rs b/substrate/client/finality-grandpa/rpc/src/report.rs index 0482d90f58f0a0bf67001d570c77686440a9f969..fef8f22659953c6d6700f34e418f73144a2c3b59 100644 --- a/substrate/client/finality-grandpa/rpc/src/report.rs +++ b/substrate/client/finality-grandpa/rpc/src/report.rs @@ -44,11 +44,8 @@ where H: Clone + Debug + Eq, { fn get(&self) -> (u64, HashSet) { - let current_voters: HashSet = self - .current_authorities() - .iter() - .map(|p| p.0.clone()) - .collect(); + let current_voters: HashSet = + self.current_authorities().iter().map(|p| p.0.clone()).collect(); (self.set_id(), current_voters) } @@ -152,10 +149,6 @@ impl ReportedRoundStates { .map(|(round, round_state)| RoundState::from(*round, round_state, ¤t_voters)) .collect::, Error>>()?; - Ok(Self { - set_id, - best, - background, - }) + Ok(Self { set_id, best, background }) } } diff --git a/substrate/client/finality-grandpa/src/authorities.rs b/substrate/client/finality-grandpa/src/authorities.rs index a04be72f9d31ebb5eef1741784e3e34a960c48e8..60a347acc35bc10052acd022367e3b2c47f337ad 100644 --- a/substrate/client/finality-grandpa/src/authorities.rs +++ b/substrate/client/finality-grandpa/src/authorities.rs @@ -18,18 +18,16 @@ //! Utilities for dealing with authorities, authority sets, and handoffs. -use std::cmp::Ord; -use std::fmt::Debug; -use std::ops::Add; +use std::{cmp::Ord, fmt::Debug, ops::Add}; -use fork_tree::ForkTree; -use parking_lot::MappedMutexGuard; use finality_grandpa::voter_set::VoterSet; -use parity_scale_codec::{Encode, Decode}; +use fork_tree::ForkTree; use log::debug; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::MappedMutexGuard; +use sc_consensus::shared_data::{SharedData, SharedDataLocked}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sp_finality_grandpa::{AuthorityId, AuthorityList}; -use sc_consensus::shared_data::{SharedData, SharedDataLocked}; use crate::SetId; @@ -77,9 +75,7 @@ pub struct SharedAuthoritySet { impl Clone for SharedAuthoritySet { fn clone(&self) -> Self { - SharedAuthoritySet { - inner: self.inner.clone(), - } + SharedAuthoritySet { inner: self.inner.clone() } } } @@ -92,16 +88,15 @@ impl SharedAuthoritySet { /// Returns access to the [`AuthoritySet`] and locks it. /// /// For more information see [`SharedDataLocked`]. - pub(crate) fn inner_locked( - &self, - ) -> SharedDataLocked> { + pub(crate) fn inner_locked(&self) -> SharedDataLocked> { self.inner.shared_data_locked() } } impl SharedAuthoritySet -where N: Add + Ord + Clone + Debug, - H: Clone + Debug +where + N: Add + Ord + Clone + Debug, + H: Clone + Debug, { /// Get the earliest limit-block number that's higher or equal to the given /// min number, if any. @@ -136,9 +131,7 @@ where N: Add + Ord + Clone + Debug, impl From> for SharedAuthoritySet { fn from(set: AuthoritySet) -> Self { - SharedAuthoritySet { - inner: SharedData::new(set), - } + SharedAuthoritySet { inner: SharedData::new(set) } } } @@ -191,7 +184,7 @@ where /// Get a genesis set with given authorities. pub(crate) fn genesis(initial: AuthorityList) -> Option { if Self::invalid_authority_list(&initial) { - return None; + return None } Some(AuthoritySet { @@ -212,7 +205,7 @@ where authority_set_changes: AuthoritySetChanges, ) -> Option { if Self::invalid_authority_list(&authorities) { - return None; + return None } Some(AuthoritySet { @@ -255,7 +248,7 @@ where for change in &self.pending_forced_changes { if is_descendent_of(&change.canon_hash, best_hash)? { forced = Some((change.canon_hash.clone(), change.canon_height.clone())); - break; + break } } @@ -263,16 +256,13 @@ where for (_, _, change) in self.pending_standard_changes.roots() { if is_descendent_of(&change.canon_hash, best_hash)? { standard = Some((change.canon_hash.clone(), change.canon_height.clone())); - break; + break } } let earliest = match (forced, standard) { - (Some(forced), Some(standard)) => Some(if forced.1 < standard.1 { - forced - } else { - standard - }), + (Some(forced), Some(standard)) => + Some(if forced.1 < standard.1 { forced } else { standard }), (Some(forced), None) => Some(forced), (None, Some(standard)) => Some(standard), (None, None) => None, @@ -300,12 +290,7 @@ where pending.delay, ); - self.pending_standard_changes.import( - hash, - number, - pending, - is_descendent_of, - )?; + self.pending_standard_changes.import(hash, number, pending, is_descendent_of)?; debug!( target: "afg", @@ -329,21 +314,21 @@ where { for change in &self.pending_forced_changes { if change.canon_hash == pending.canon_hash { - return Err(Error::DuplicateAuthoritySetChange); + return Err(Error::DuplicateAuthoritySetChange) } if is_descendent_of(&change.canon_hash, &pending.canon_hash)? { - return Err(Error::MultiplePendingForcedAuthoritySetChanges); + return Err(Error::MultiplePendingForcedAuthoritySetChanges) } } // ordered first by effective number and then by signal-block number. let key = (pending.effective_number(), pending.canon_height.clone()); - let idx = self.pending_forced_changes - .binary_search_by_key(&key, |change| ( - change.effective_number(), - change.canon_height.clone(), - )) + let idx = self + .pending_forced_changes + .binary_search_by_key(&key, |change| { + (change.effective_number(), change.canon_height.clone()) + }) .unwrap_or_else(|i| i); debug!( @@ -376,24 +361,22 @@ where E: std::error::Error, { if Self::invalid_authority_list(&pending.next_authorities) { - return Err(Error::InvalidAuthoritySet); + return Err(Error::InvalidAuthoritySet) } match pending.delay_kind { - DelayKind::Best { .. } => { - self.add_forced_change(pending, is_descendent_of) - }, - DelayKind::Finalized => { - self.add_standard_change(pending, is_descendent_of) - }, + DelayKind::Best { .. } => self.add_forced_change(pending, is_descendent_of), + DelayKind::Finalized => self.add_standard_change(pending, is_descendent_of), } } /// Inspect pending changes. Standard pending changes are iterated first, /// and the changes in the tree are traversed in pre-order, afterwards all /// forced changes are iterated. - pub(crate) fn pending_changes(&self) -> impl Iterator> { - self.pending_standard_changes.iter().map(|(_, _, c)| c) + pub(crate) fn pending_changes(&self) -> impl Iterator> { + self.pending_standard_changes + .iter() + .map(|(_, _, c)| c) .chain(self.pending_forced_changes.iter()) } @@ -404,7 +387,8 @@ where /// Only standard changes are taken into account for the current /// limit, since any existing forced change should preclude the voter from voting. pub(crate) fn current_limit(&self, min: N) -> Option { - self.pending_standard_changes.roots() + self.pending_standard_changes + .roots() .filter(|&(_, _, c)| c.effective_number() >= min) .min_by_key(|&(_, _, c)| c.effective_number()) .map(|(_, _, c)| c.effective_number()) @@ -450,9 +434,7 @@ where // the block that signaled the change. if change.canon_hash == best_hash || is_descendent_of(&change.canon_hash, &best_hash)? { let median_last_finalized = match change.delay_kind { - DelayKind::Best { - ref median_last_finalized, - } => median_last_finalized.clone(), + DelayKind::Best { ref median_last_finalized } => median_last_finalized.clone(), _ => unreachable!( "pending_forced_changes only contains forced changes; forced changes have delay kind Best; qed." ), @@ -460,8 +442,8 @@ where // check if there's any pending standard change that we depend on for (_, _, standard_change) in self.pending_standard_changes.roots() { - if standard_change.effective_number() <= median_last_finalized - && is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? + if standard_change.effective_number() <= median_last_finalized && + is_descendent_of(&standard_change.canon_hash, &change.canon_hash)? { log::info!(target: "afg", "Not applying authority set change forced at block #{:?}, due to pending standard change at block #{:?}", @@ -469,11 +451,9 @@ where standard_change.effective_number(), ); - return Err( - Error::ForcedAuthoritySetChangeDependencyUnsatisfied( - standard_change.effective_number() - ) - ); + return Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied( + standard_change.effective_number(), + )) } } @@ -505,7 +485,7 @@ where }, )); - break; + break } } @@ -536,24 +516,19 @@ where F: Fn(&H, &H) -> Result, E: std::error::Error, { - let mut status = Status { - changed: false, - new_set_block: None, - }; + let mut status = Status { changed: false, new_set_block: None }; match self.pending_standard_changes.finalize_with_descendent_if( &finalized_hash, finalized_number.clone(), is_descendent_of, - |change| change.effective_number() <= finalized_number + |change| change.effective_number() <= finalized_number, )? { fork_tree::FinalizationResult::Changed(change) => { status.changed = true; - let pending_forced_changes = std::mem::replace( - &mut self.pending_forced_changes, - Vec::new(), - ); + let pending_forced_changes = + std::mem::replace(&mut self.pending_forced_changes, Vec::new()); // we will keep all forced changes for any later blocks and that are a // descendent of the finalized block (i.e. they are part of this branch). @@ -566,7 +541,8 @@ where } if let Some(change) = change { - afg_log!(initial_sync, + afg_log!( + initial_sync, "👴 Applying authority set change scheduled at block #{:?}", change.canon_height, ); @@ -583,10 +559,7 @@ where self.current_authorities = change.next_authorities; self.set_id += 1; - status.new_set_block = Some(( - finalized_hash, - finalized_number, - )); + status.new_set_block = Some((finalized_hash, finalized_number)); } }, fork_tree::FinalizationResult::Unchanged => {}, @@ -615,12 +588,14 @@ where F: Fn(&H, &H) -> Result, E: std::error::Error, { - self.pending_standard_changes.finalizes_any_with_descendent_if( - &finalized_hash, - finalized_number.clone(), - is_descendent_of, - |change| change.effective_number() == finalized_number - ).map_err(Error::ForkTree) + self.pending_standard_changes + .finalizes_any_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() == finalized_number, + ) + .map_err(Error::ForkTree) } } @@ -654,7 +629,9 @@ pub struct PendingChange { } impl Decode for PendingChange { - fn decode(value: &mut I) -> Result { + fn decode( + value: &mut I, + ) -> Result { let next_authorities = Decode::decode(value)?; let delay = Decode::decode(value)?; let canon_height = Decode::decode(value)?; @@ -662,17 +639,11 @@ impl Decode for PendingChange { let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); - Ok(PendingChange { - next_authorities, - delay, - canon_height, - canon_hash, - delay_kind, - }) + Ok(PendingChange { next_authorities, delay, canon_height, canon_hash, delay_kind }) } } -impl + Clone> PendingChange { +impl + Clone> PendingChange { /// Returns the effective number this change will be applied at. pub fn effective_number(&self) -> N { self.canon_height.clone() + self.delay.clone() @@ -715,15 +686,17 @@ impl AuthoritySetChanges { } pub(crate) fn get_set_id(&self, block_number: N) -> AuthoritySetChangeId { - if self.0 + if self + .0 .last() .map(|last_auth_change| last_auth_change.1 < block_number) .unwrap_or(false) { - return AuthoritySetChangeId::Latest; + return AuthoritySetChangeId::Latest } - let idx = self.0 + let idx = self + .0 .binary_search_by_key(&block_number, |(_, n)| n.clone()) .unwrap_or_else(|b| b); @@ -732,7 +705,7 @@ impl AuthoritySetChanges { // if this is the first index but not the first set id then we are missing data. if idx == 0 && set_id != 0 { - return AuthoritySetChangeId::Unknown; + return AuthoritySetChangeId::Unknown } AuthoritySetChangeId::Set(set_id, block_number) @@ -745,7 +718,9 @@ impl AuthoritySetChanges { /// number (excluded). The iterator yields a tuple representing the set id and the block number /// of the last block in that set. pub fn iter_from(&self, block_number: N) -> Option> { - let idx = self.0.binary_search_by_key(&block_number, |(_, n)| n.clone()) + let idx = self + .0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) // if there was a change at the given block number then we should start on the next // index since we want to exclude the current block number .map(|n| n + 1) @@ -756,7 +731,7 @@ impl AuthoritySetChanges { // if this is the first index but not the first set id then we are missing data. if idx == 0 && set_id != 0 { - return None; + return None } } @@ -769,14 +744,13 @@ mod tests { use super::*; use sp_core::crypto::Public; - fn static_is_descendent_of(value: bool) - -> impl Fn(&A, &A) -> Result - { + fn static_is_descendent_of(value: bool) -> impl Fn(&A, &A) -> Result { move |_, _| Ok(value) } fn is_descendent_of(f: F) -> impl Fn(&A, &A) -> Result - where F: Fn(&A, &A) -> bool + where + F: Fn(&A, &A) -> bool, { move |base, hash| Ok(f(base, hash)) } @@ -793,14 +767,12 @@ mod tests { authority_set_changes: AuthoritySetChanges::empty(), }; - let change = |height| { - PendingChange { - next_authorities: current_authorities.clone(), - delay: 0, - canon_height: height, - canon_hash: height.to_string(), - delay_kind: DelayKind::Finalized, - } + let change = |height| PendingChange { + next_authorities: current_authorities.clone(), + delay: 0, + canon_height: height, + canon_hash: height.to_string(), + delay_kind: DelayKind::Finalized, }; let is_descendent_of = static_is_descendent_of(false); @@ -808,25 +780,13 @@ mod tests { authorities.add_pending_change(change(1), &is_descendent_of).unwrap(); authorities.add_pending_change(change(2), &is_descendent_of).unwrap(); - assert_eq!( - authorities.current_limit(0), - Some(1), - ); + assert_eq!(authorities.current_limit(0), Some(1),); - assert_eq!( - authorities.current_limit(1), - Some(1), - ); + assert_eq!(authorities.current_limit(1), Some(1),); - assert_eq!( - authorities.current_limit(2), - Some(2), - ); + assert_eq!(authorities.current_limit(2), Some(2),); - assert_eq!( - authorities.current_limit(3), - None, - ); + assert_eq!(authorities.current_limit(3), None,); } #[test] @@ -865,13 +825,22 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_c.clone(), &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - })).unwrap(); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change( + change_c.clone(), + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + ) + .unwrap(); // forced changes are iterated last let change_d = PendingChange { @@ -890,8 +859,12 @@ mod tests { delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; - authorities.add_pending_change(change_d.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_e.clone(), &static_is_descendent_of(false)).unwrap(); + authorities + .add_pending_change(change_d.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_e.clone(), &static_is_descendent_of(false)) + .unwrap(); // ordered by subtree depth assert_eq!( @@ -930,46 +903,48 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_a, &change_b], - ); + assert_eq!(authorities.pending_changes().collect::>(), vec![&change_a, &change_b],); // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" - let status = authorities.apply_standard_changes( - "hash_c", - 11, - &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - }), - false, - None, - ).unwrap(); + let status = authorities + .apply_standard_changes( + "hash_c", + 11, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + false, + None, + ) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, None); - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_a], - ); + assert_eq!(authorities.pending_changes().collect::>(), vec![&change_a],); assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); // finalizing "hash_d" will enact the change signaled at "hash_a" - let status = authorities.apply_standard_changes( - "hash_d", - 15, - &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_d") => true, - _ => unreachable!(), - }), - false, - None, - ).unwrap(); + let status = authorities + .apply_standard_changes( + "hash_d", + 15, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + _ => unreachable!(), + }), + false, + None, + ) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_d", 15))); @@ -1010,8 +985,12 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_c.clone(), &static_is_descendent_of(true)).unwrap(); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c.clone(), &static_is_descendent_of(true)) + .unwrap(); let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { ("hash_a", "hash_b") => true, @@ -1032,13 +1011,9 @@ mod tests { )); assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); - let status = authorities.apply_standard_changes( - "hash_b", - 15, - &is_descendent_of, - false, - None, - ).unwrap(); + let status = authorities + .apply_standard_changes("hash_b", 15, &is_descendent_of, false, None) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_b", 15))); @@ -1048,13 +1023,9 @@ mod tests { assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); // after finalizing `change_a` it should be possible to finalize `change_c` - let status = authorities.apply_standard_changes( - "hash_d", - 40, - &is_descendent_of, - false, - None, - ).unwrap(); + let status = authorities + .apply_standard_changes("hash_d", 40, &is_descendent_of, false, None) + .unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_d", 40))); @@ -1092,8 +1063,12 @@ mod tests { delay_kind: DelayKind::Finalized, }; - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { ("hash_a", "hash_d") => true, @@ -1160,8 +1135,12 @@ mod tests { delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; - authorities.add_pending_change(change_a, &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(false)).unwrap(); + authorities + .add_pending_change(change_a, &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); // no duplicates are allowed assert!(matches!( @@ -1172,7 +1151,9 @@ mod tests { // there's an effective change triggered at block 15 but not a standard one. // so this should do nothing. assert_eq!( - authorities.enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)).unwrap(), + authorities + .enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)) + .unwrap(), None, ); @@ -1194,20 +1175,16 @@ mod tests { // let's try and apply the forced changes. // too early and there's no forced changes to apply. - assert!( - authorities - .apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false, None) - .unwrap() - .is_none() - ); + assert!(authorities + .apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false, None) + .unwrap() + .is_none()); // too late. - assert!( - authorities - .apply_forced_changes("hash_a16", 16, &is_descendent_of_a, false, None) - .unwrap() - .is_none() - ); + assert!(authorities + .apply_forced_changes("hash_a16", 16, &is_descendent_of_a, false, None) + .unwrap() + .is_none()); // on time -- chooses the right change for this fork. assert_eq!( @@ -1247,9 +1224,7 @@ mod tests { delay: 0, canon_height: 5, canon_hash: "hash_a", - delay_kind: DelayKind::Best { - median_last_finalized: 0, - }, + delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; // and import it @@ -1258,12 +1233,10 @@ mod tests { .unwrap(); // it should be enacted at the same block that signaled it - assert!( - authorities - .apply_forced_changes("hash_a", 5, &static_is_descendent_of(false), false, None) - .unwrap() - .is_some() - ); + assert!(authorities + .apply_forced_changes("hash_a", 5, &static_is_descendent_of(false), false, None) + .unwrap() + .is_some()); } #[test] @@ -1306,9 +1279,15 @@ mod tests { }; // add some pending standard changes all on the same fork - authorities.add_pending_change(change_a, &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_b, &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_c, &static_is_descendent_of(true)).unwrap(); + authorities + .add_pending_change(change_a, &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b, &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c, &static_is_descendent_of(true)) + .unwrap(); // effective at #45 let change_d = PendingChange { @@ -1316,18 +1295,24 @@ mod tests { delay: 5, canon_height: 40, canon_hash: "hash_d", - delay_kind: DelayKind::Best { - median_last_finalized: 31, - }, + delay_kind: DelayKind::Best { median_last_finalized: 31 }, }; // now add a forced change on the same fork - authorities.add_pending_change(change_d, &static_is_descendent_of(true)).unwrap(); + authorities + .add_pending_change(change_d, &static_is_descendent_of(true)) + .unwrap(); // the forced change cannot be applied since the pending changes it depends on // have not been applied yet. assert!(matches!( - authorities.apply_forced_changes("hash_d45", 45, &static_is_descendent_of(true), false, None), + authorities.apply_forced_changes( + "hash_d45", + 45, + &static_is_descendent_of(true), + false, + None + ), Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(15)) )); assert_eq!(authorities.authority_set_changes, AuthoritySetChanges::empty()); @@ -1340,7 +1325,13 @@ mod tests { // but the forced change still depends on the next standard change assert!(matches!( - authorities.apply_forced_changes("hash_d", 45, &static_is_descendent_of(true), false, None), + authorities.apply_forced_changes( + "hash_d", + 45, + &static_is_descendent_of(true), + false, + None + ), Err(Error::ForcedAuthoritySetChangeDependencyUnsatisfied(20)) )); assert_eq!(authorities.authority_set_changes, AuthoritySetChanges(vec![(0, 15)])); @@ -1425,29 +1416,19 @@ mod tests { }); // add the three pending changes - authorities - .add_pending_change(change_b, &is_descendent_of) - .unwrap(); - authorities - .add_pending_change(change_a0, &is_descendent_of) - .unwrap(); - authorities - .add_pending_change(change_a1, &is_descendent_of) - .unwrap(); + authorities.add_pending_change(change_b, &is_descendent_of).unwrap(); + authorities.add_pending_change(change_a0, &is_descendent_of).unwrap(); + authorities.add_pending_change(change_a1, &is_descendent_of).unwrap(); // the earliest change at block `best_a` should be the change at A0 (#5) assert_eq!( - authorities - .next_change(&"best_a", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), Some(("hash_a0", 5)), ); // the earliest change at block `best_b` should be the change at B (#4) assert_eq!( - authorities - .next_change(&"best_b", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_b", &is_descendent_of).unwrap(), Some(("hash_b", 4)), ); @@ -1458,19 +1439,12 @@ mod tests { // the next change is now at A1 (#10) assert_eq!( - authorities - .next_change(&"best_a", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), Some(("hash_a1", 10)), ); // there's no longer any pending change at `best_b` fork - assert_eq!( - authorities - .next_change(&"best_b", &is_descendent_of) - .unwrap(), - None, - ); + assert_eq!(authorities.next_change(&"best_b", &is_descendent_of).unwrap(), None,); // we a forced change at A10 (#8) let change_a10 = PendingChange { @@ -1478,9 +1452,7 @@ mod tests { delay: 0, canon_height: 8, canon_hash: "hash_a10", - delay_kind: DelayKind::Best { - median_last_finalized: 0, - }, + delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; authorities @@ -1489,9 +1461,7 @@ mod tests { // it should take precedence over the change at A1 (#10) assert_eq!( - authorities - .next_change(&"best_a", &is_descendent_of) - .unwrap(), + authorities.next_change(&"best_a", &is_descendent_of).unwrap(), Some(("hash_a10", 8)), ); } @@ -1511,16 +1481,11 @@ mod tests { None, ); - let invalid_authorities_weight = vec![ - (AuthorityId::from_slice(&[1; 32]), 5), - (AuthorityId::from_slice(&[2; 32]), 0), - ]; + let invalid_authorities_weight = + vec![(AuthorityId::from_slice(&[1; 32]), 5), (AuthorityId::from_slice(&[2; 32]), 0)]; // authority weight of zero is invalid - assert_eq!( - AuthoritySet::<(), ()>::genesis(invalid_authorities_weight.clone()), - None - ); + assert_eq!(AuthoritySet::<(), ()>::genesis(invalid_authorities_weight.clone()), None); assert_eq!( AuthoritySet::<(), ()>::new( invalid_authorities_weight.clone(), @@ -1557,9 +1522,7 @@ mod tests { delay: 10, canon_height: 5, canon_hash: (), - delay_kind: DelayKind::Best { - median_last_finalized: 0, - }, + delay_kind: DelayKind::Best { median_last_finalized: 0 }, }; // pending change contains an an authority set @@ -1617,17 +1580,13 @@ mod tests { canon_height, canon_hash, delay_kind: if forced { - DelayKind::Best { - median_last_finalized: 0, - } + DelayKind::Best { median_last_finalized: 0 } } else { DelayKind::Finalized }, }; - authorities - .add_pending_change(change, &is_descendent_of) - .unwrap(); + authorities.add_pending_change(change, &is_descendent_of).unwrap(); }; add_pending_change(5, "A", false); @@ -1669,14 +1628,7 @@ mod tests { .unwrap(); assert_eq!(authorities.pending_forced_changes.len(), 1); - assert_eq!( - authorities - .pending_forced_changes - .first() - .unwrap() - .canon_hash, - "D" - ); + assert_eq!(authorities.pending_forced_changes.first().unwrap().canon_hash, "D"); } #[test] @@ -1714,10 +1666,7 @@ mod tests { authority_set_changes.append(2, 81); // we are missing the data for the first set, therefore we should return `None` - assert_eq!( - None, - authority_set_changes.iter_from(40).map(|it| it.collect::>()), - ); + assert_eq!(None, authority_set_changes.iter_from(40).map(|it| it.collect::>()),); // after adding the data for the first set the same query should work let mut authority_set_changes = AuthoritySetChanges::empty(); @@ -1736,14 +1685,8 @@ mod tests { authority_set_changes.iter_from(41).map(|it| it.cloned().collect::>()), ); - assert_eq!( - 0, - authority_set_changes.iter_from(121).unwrap().count(), - ); + assert_eq!(0, authority_set_changes.iter_from(121).unwrap().count(),); - assert_eq!( - 0, - authority_set_changes.iter_from(200).unwrap().count(), - ); + assert_eq!(0, authority_set_changes.iter_from(200).unwrap().count(),); } } diff --git a/substrate/client/finality-grandpa/src/aux_schema.rs b/substrate/client/finality-grandpa/src/aux_schema.rs index 296f7c13c5244c0cf160a713df8e8d6854ce4da1..179e8876e66d844e00c095e4b5a0b61c326478b6 100644 --- a/substrate/client/finality-grandpa/src/aux_schema.rs +++ b/substrate/client/finality-grandpa/src/aux_schema.rs @@ -30,13 +30,16 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_finality_grandpa::{AuthorityList, RoundNumber, SetId}; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::authorities::{ - AuthoritySet, AuthoritySetChanges, DelayKind, PendingChange, SharedAuthoritySet, +use crate::{ + authorities::{ + AuthoritySet, AuthoritySetChanges, DelayKind, PendingChange, SharedAuthoritySet, + }, + environment::{ + CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, + VoterSetState, + }, + GrandpaJustification, NewAuthoritySet, }; -use crate::environment::{ - CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, -}; -use crate::{GrandpaJustification, NewAuthoritySet}; const VERSION_KEY: &[u8] = b"grandpa_schema_version"; const SET_STATE_KEY: &[u8] = b"grandpa_completed_round"; @@ -141,13 +144,13 @@ struct V2AuthoritySet { pub(crate) fn load_decode( backend: &B, - key: &[u8] + key: &[u8], ) -> ClientResult> { match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) .map_err(|e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e))) - .map(Some) + .map(Some), } } @@ -160,24 +163,16 @@ pub(crate) struct PersistentData { fn migrate_from_version0( backend: &B, genesis_round: &G, -) -> ClientResult< - Option<( - AuthoritySet>, - VoterSetState, - )>, -> +) -> ClientResult>, VoterSetState)>> where B: AuxStore, G: Fn() -> RoundState>, { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; - if let Some(old_set) = load_decode::<_, V0AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { + if let Some(old_set) = + load_decode::<_, V0AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { let new_set: AuthoritySet> = old_set.into(); backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; @@ -193,7 +188,7 @@ where let set_id = new_set.set_id; let base = last_round_state.prevote_ghost.expect( - "state is for completed round; completed rounds must have a prevote ghost; qed." + "state is for completed round; completed rounds must have a prevote ghost; qed.", ); let mut current_rounds = CurrentRounds::new(); @@ -215,7 +210,7 @@ where backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((new_set, set_state))); + return Ok(Some((new_set, set_state))) } Ok(None) @@ -224,36 +219,25 @@ where fn migrate_from_version1( backend: &B, genesis_round: &G, -) -> ClientResult< - Option<( - AuthoritySet>, - VoterSetState, - )>, -> +) -> ClientResult>, VoterSetState)>> where B: AuxStore, G: Fn() -> RoundState>, { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; - if let Some(set) = load_decode::<_, AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { + if let Some(set) = + load_decode::<_, AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { let set_id = set.set_id; - let completed_rounds = |number, state, base| CompletedRounds::new( - CompletedRound { - number, - state, - votes: Vec::new(), - base, - }, - set_id, - &set, - ); + let completed_rounds = |number, state, base| { + CompletedRounds::new( + CompletedRound { number, state, votes: Vec::new(), base }, + set_id, + &set, + ) + }; let set_state = match load_decode::<_, V1VoterSetState>>( backend, @@ -284,17 +268,13 @@ where let base = set_state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::live( - set_id, - &set, - base, - ) + VoterSetState::live(set_id, &set, base) }, }; backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((set, set_state))); + return Ok(Some((set, set_state))) } Ok(None) @@ -303,46 +283,31 @@ where fn migrate_from_version2( backend: &B, genesis_round: &G, -) -> ClientResult< - Option<( - AuthoritySet>, - VoterSetState, - )>, -> +) -> ClientResult>, VoterSetState)>> where B: AuxStore, G: Fn() -> RoundState>, { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; - if let Some(old_set) = load_decode::<_, V2AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { + if let Some(old_set) = + load_decode::<_, V2AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { let new_set: AuthoritySet> = old_set.into(); backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; - let set_state = match load_decode::<_, VoterSetState>( - backend, - SET_STATE_KEY, - )? { + let set_state = match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { Some(state) => state, None => { let state = genesis_round(); let base = state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::live( - new_set.set_id, - &new_set, - base, - ) - } + VoterSetState::live(new_set.set_id, &new_set, base) + }, }; - return Ok(Some((new_set, set_state))); + return Ok(Some((new_set, set_state))) } Ok(None) @@ -371,7 +336,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }); + }) } }, Some(1) => { @@ -381,7 +346,7 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }); + }) } }, Some(2) => { @@ -391,41 +356,31 @@ where return Ok(PersistentData { authority_set: new_set.into(), set_state: set_state.into(), - }); + }) } - } + }, Some(3) => { if let Some(set) = load_decode::<_, AuthoritySet>>( backend, AUTHORITY_SET_KEY, )? { - let set_state = match load_decode::<_, VoterSetState>( - backend, - SET_STATE_KEY, - )? { - Some(state) => state, - None => { - let state = make_genesis_round(); - let base = state.prevote_ghost + let set_state = + match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { + Some(state) => state, + None => { + let state = make_genesis_round(); + let base = state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::live( - set.set_id, - &set, - base, - ) - } - }; + VoterSetState::live(set.set_id, &set, base) + }, + }; - return Ok(PersistentData { - authority_set: set.into(), - set_state: set_state.into(), - }); + return Ok(PersistentData { authority_set: set.into(), set_state: set_state.into() }) } - } - Some(other) => return Err(ClientError::Backend( - format!("Unsupported GRANDPA DB version: {:?}", other) - )), + }, + Some(other) => + return Err(ClientError::Backend(format!("Unsupported GRANDPA DB version: {:?}", other))), } // genesis. @@ -436,14 +391,11 @@ where let genesis_set = AuthoritySet::genesis(genesis_authorities) .expect("genesis authorities is non-empty; all weights are non-zero; qed."); let state = make_genesis_round(); - let base = state.prevote_ghost + let base = state + .prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - let genesis_state = VoterSetState::live( - 0, - &genesis_set, - base, - ); + let genesis_state = VoterSetState::live(0, &genesis_set, base); backend.insert_aux( &[ @@ -453,10 +405,7 @@ where &[], )?; - Ok(PersistentData { - authority_set: genesis_set.into(), - set_state: genesis_state.into(), - }) + Ok(PersistentData { authority_set: genesis_set.into(), set_state: genesis_state.into() }) } /// Update the authority set on disk after a change. @@ -486,10 +435,7 @@ where ); let encoded = set_state.encode(); - write_aux(&[ - (AUTHORITY_SET_KEY, &encoded_set[..]), - (SET_STATE_KEY, &encoded[..]), - ]) + write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..]), (SET_STATE_KEY, &encoded[..])]) } else { write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) } @@ -527,10 +473,7 @@ pub(crate) fn write_voter_set_state( backend: &B, state: &VoterSetState, ) -> ClientResult<()> { - backend.insert_aux( - &[(SET_STATE_KEY, state.encode().as_slice())], - &[] - ) + backend.insert_aux(&[(SET_STATE_KEY, state.encode().as_slice())], &[]) } /// Write concluded round. @@ -554,10 +497,10 @@ pub(crate) fn load_authorities( #[cfg(test)] mod test { - use sp_finality_grandpa::AuthorityId; + use super::*; use sp_core::H256; + use sp_finality_grandpa::AuthorityId; use substrate_test_runtime_client; - use super::*; #[test] fn load_decode_from_v0_migrates_data_format() { @@ -582,19 +525,18 @@ mod test { let voter_set_state = (round_number, round_state.clone()); - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - ], - &[], - ).unwrap(); + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + ], + &[], + ) + .unwrap(); } - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - None, - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), None,); // should perform the migration load_persistent::( @@ -602,23 +544,19 @@ mod test { H256::random(), 0, || unreachable!(), - ).unwrap(); + ) + .unwrap(); - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(3), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3),); - let PersistentData { - authority_set, - set_state, - .. - } = load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); + let PersistentData { authority_set, set_state, .. } = + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); assert_eq!( *authority_set.inner(), @@ -628,7 +566,8 @@ mod test { ForkTree::new(), Vec::new(), AuthoritySetChanges::empty(), - ).unwrap(), + ) + .unwrap(), ); let mut current_rounds = CurrentRounds::new(); @@ -673,24 +612,24 @@ mod test { ForkTree::new(), Vec::new(), AuthoritySetChanges::empty(), - ).unwrap(); + ) + .unwrap(); let voter_set_state = V1VoterSetState::Live(round_number, round_state.clone()); - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - (VERSION_KEY, 1u32.encode().as_slice()), - ], - &[], - ).unwrap(); + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 1u32.encode().as_slice()), + ], + &[], + ) + .unwrap(); } - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(1), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(1),); // should perform the migration load_persistent::( @@ -698,23 +637,19 @@ mod test { H256::random(), 0, || unreachable!(), - ).unwrap(); + ) + .unwrap(); - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(3), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3),); - let PersistentData { - authority_set, - set_state, - .. - } = load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); + let PersistentData { authority_set, set_state, .. } = + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); assert_eq!( *authority_set.inner(), @@ -724,7 +659,8 @@ mod test { ForkTree::new(), Vec::new(), AuthoritySetChanges::empty(), - ).unwrap(), + ) + .unwrap(), ); let mut current_rounds = CurrentRounds::new(); @@ -768,23 +704,22 @@ mod test { VoterSetState::live( set_id, &authority_set.clone().into(), // Note the conversion! - genesis_state + genesis_state, ); - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - (VERSION_KEY, 2u32.encode().as_slice()), - ], - &[], - ).unwrap(); + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 2u32.encode().as_slice()), + ], + &[], + ) + .unwrap(); } - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(2), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(2),); // should perform the migration load_persistent::( @@ -792,22 +727,17 @@ mod test { H256::random(), 0, || unreachable!(), - ).unwrap(); + ) + .unwrap(); - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(3), - ); + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), Some(3),); - let PersistentData { - authority_set, - .. - } = load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); + let PersistentData { authority_set, .. } = load_persistent::< + substrate_test_runtime_client::runtime::Block, + _, + _, + >(&client, H256::random(), 0, || unreachable!()) + .unwrap(); assert_eq!( *authority_set.inner(), @@ -817,7 +747,8 @@ mod test { ForkTree::new(), Vec::new(), AuthoritySetChanges::empty(), - ).unwrap(), + ) + .unwrap(), ); } @@ -843,7 +774,8 @@ mod test { assert_eq!( load_decode::<_, CompletedRound::>( &client, &key - ).unwrap(), + ) + .unwrap(), Some(completed_round), ); } diff --git a/substrate/client/finality-grandpa/src/communication/gossip.rs b/substrate/client/finality-grandpa/src/communication/gossip.rs index 1b3b5ea7c5d24b4f1a9ff3362744aa5aee1e2c11..60a9cde904d8aec937a150354fad6dd4b998b927 100644 --- a/substrate/client/finality-grandpa/src/communication/gossip.rs +++ b/substrate/client/finality-grandpa/src/communication/gossip.rs @@ -84,23 +84,25 @@ //! //! We only send polite messages to peers, -use sp_runtime::traits::{NumberFor, Block as BlockT, Zero}; -use sc_network_gossip::{MessageIntent, ValidatorContext}; +use parity_scale_codec::{Decode, Encode}; use sc_network::{ObservedRole, PeerId, ReputationChange}; -use parity_scale_codec::{Encode, Decode}; +use sc_network_gossip::{MessageIntent, ValidatorContext}; use sp_finality_grandpa::AuthorityId; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use log::{debug, trace}; +use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; +use rand::seq::SliceRandom; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG}; -use log::{trace, debug}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use prometheus_endpoint::{CounterVec, Opts, PrometheusError, register, Registry, U64}; -use rand::seq::SliceRandom; +use super::{benefit, cost, Round, SetId}; use crate::{environment, CatchUp, CompactCommit, SignedMessage}; -use super::{cost, benefit, Round, SetId}; -use std::collections::{HashMap, VecDeque, HashSet}; -use std::time::Duration; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + time::Duration, +}; use wasm_timer::Instant; const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); @@ -146,18 +148,14 @@ enum Consider { /// A view of protocol state. #[derive(Debug)] struct View { - round: Round, // the current round we are at. - set_id: SetId, // the current voter set id. + round: Round, // the current round we are at. + set_id: SetId, // the current voter set id. last_commit: Option, // commit-finalized block height, if any. } impl Default for View { fn default() -> Self { - View { - round: Round(1), - set_id: SetId(0), - last_commit: None, - } + View { round: Round(1), set_id: SetId(0), last_commit: None } } } @@ -165,12 +163,20 @@ impl View { /// Consider a round and set ID combination under a current view. fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { // only from current set - if set_id < self.set_id { return Consider::RejectPast } - if set_id > self.set_id { return Consider::RejectFuture } + if set_id < self.set_id { + return Consider::RejectPast + } + if set_id > self.set_id { + return Consider::RejectFuture + } // only r-1 ... r+1 - if round.0 > self.round.0.saturating_add(1) { return Consider::RejectFuture } - if round.0 < self.round.0.saturating_sub(1) { return Consider::RejectPast } + if round.0 > self.round.0.saturating_add(1) { + return Consider::RejectFuture + } + if round.0 < self.round.0.saturating_sub(1) { + return Consider::RejectPast + } Consider::Accept } @@ -179,18 +185,23 @@ impl View { /// because we gate on finalization of a further block than a previous commit. fn consider_global(&self, set_id: SetId, number: N) -> Consider { // only from current set - if set_id < self.set_id { return Consider::RejectPast } - if set_id > self.set_id { return Consider::RejectFuture } + if set_id < self.set_id { + return Consider::RejectPast + } + if set_id > self.set_id { + return Consider::RejectFuture + } // only commits which claim to prove a higher block number than // the one we're aware of. match self.last_commit { None => Consider::Accept, - Some(ref num) => if num < &number { - Consider::Accept - } else { - Consider::RejectPast - } + Some(ref num) => + if num < &number { + Consider::Accept + } else { + Consider::RejectPast + }, } } } @@ -208,22 +219,13 @@ struct LocalView { impl LocalView { /// Creates a new `LocalView` at the given set id and round. fn new(set_id: SetId, round: Round) -> LocalView { - LocalView { - set_id, - round, - last_commit: None, - round_start: Instant::now(), - } + LocalView { set_id, round, last_commit: None, round_start: Instant::now() } } /// Converts the local view to a `View` discarding round and set id /// information about the last commit. fn as_view(&self) -> View<&N> { - View { - round: self.round, - set_id: self.set_id, - last_commit: self.last_commit_height(), - } + View { round: self.round, set_id: self.set_id, last_commit: self.last_commit_height() } } /// Update the set ID. implies a reset to round 1. @@ -231,7 +233,7 @@ impl LocalView { if set_id != self.set_id { self.set_id = set_id; self.round = Round(1); - self.round_start = Instant::now(); + self.round_start = Instant::now(); } } @@ -259,7 +261,7 @@ const KEEP_RECENT_ROUNDS: usize = 3; struct KeepTopics { current_set: SetId, rounds: VecDeque<(Round, SetId)>, - reverse_map: HashMap, SetId)> + reverse_map: HashMap, SetId)>, } impl KeepTopics { @@ -293,10 +295,7 @@ impl KeepTopics { map.insert(super::global_topic::(self.current_set.0), (None, self.current_set)); for &(round, set) in &self.rounds { - map.insert( - super::round_topic::(round.0, set.0), - (Some(round), set) - ); + map.insert(super::round_topic::(round.0, set.0), (Some(round), set)); } self.reverse_map = map; @@ -310,10 +309,8 @@ impl KeepTopics { // topics to send to a neighbor based on their view. fn neighbor_topics(view: &View>) -> Vec { let s = view.set_id; - let mut topics = vec![ - super::global_topic::(s.0), - super::round_topic::(view.round.0, s.0), - ]; + let mut topics = + vec![super::global_topic::(s.0), super::round_topic::(view.round.0, s.0)]; if view.round.0 != 0 { let r = Round(view.round.0 - 1); @@ -423,15 +420,9 @@ pub(super) enum Misbehavior { // could not decode neighbor message. bytes-length of the packet. UndecodablePacket(i32), // Bad catch up message (invalid signatures). - BadCatchUpMessage { - signatures_checked: i32, - }, + BadCatchUpMessage { signatures_checked: i32 }, // Bad commit message - BadCommitMessage { - signatures_checked: i32, - blocks_loaded: i32, - equivocations_caught: i32, - }, + BadCommitMessage { signatures_checked: i32, blocks_loaded: i32, equivocations_caught: i32 }, // A message received that's from the future relative to our view. // always misbehavior. FutureMessage, @@ -462,7 +453,10 @@ impl Misbehavior { let benefit = equivocations_caught.saturating_mul(benefit::PER_EQUIVOCATION); - ReputationChange::new((benefit as i32).saturating_add(cost as i32), "Grandpa: Bad commit") + ReputationChange::new( + (benefit as i32).saturating_add(cost as i32), + "Grandpa: Bad commit", + ) }, FutureMessage => cost::FUTURE_MESSAGE, OutOfScopeMessage => cost::OUT_OF_SCOPE_MESSAGE, @@ -478,10 +472,7 @@ struct PeerInfo { impl PeerInfo { fn new(roles: ObservedRole) -> Self { - PeerInfo { - view: View::default(), - roles, - } + PeerInfo { view: View::default(), roles } } } @@ -515,14 +506,14 @@ impl Peers { match role { ObservedRole::Authority if self.first_stage_peers.len() < LUCKY_PEERS => { self.first_stage_peers.insert(who.clone()); - } + }, ObservedRole::Authority if self.second_stage_peers.len() < LUCKY_PEERS => { self.second_stage_peers.insert(who.clone()); - } + }, ObservedRole::Light if self.lucky_light_peers.len() < LUCKY_PEERS => { self.lucky_light_peers.insert(who.clone()); - } - _ => {} + }, + _ => {}, } self.inner.insert(who, PeerInfo::new(role)); @@ -548,12 +539,12 @@ impl Peers { Some(p) => p, }; - let invalid_change = peer.view.set_id > update.set_id - || peer.view.round > update.round && peer.view.set_id == update.set_id - || peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); + let invalid_change = peer.view.set_id > update.set_id || + peer.view.round > update.round && peer.view.set_id == update.set_id || + peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); if invalid_change { - return Err(Misbehavior::InvalidViewChange); + return Err(Misbehavior::InvalidViewChange) } peer.view = View { @@ -578,7 +569,7 @@ impl Peers { // same height, because there is still a misbehavior condition based on // sending commits that are <= the best we are aware of. if peer.view.last_commit.as_ref() > Some(&new_height) { - return Err(Misbehavior::InvalidViewChange); + return Err(Misbehavior::InvalidViewChange) } peer.view.last_commit = Some(new_height); @@ -630,7 +621,7 @@ impl Peers { } else if n_authorities_added < one_and_a_half_lucky { second_stage_peers.insert(peer_id.clone()); } else { - break; + break } n_authorities_added += 1; @@ -641,7 +632,7 @@ impl Peers { let n_second_stage_peers = LUCKY_PEERS.max((shuffled_peers.len() as f32).sqrt() as usize); for (peer_id, info) in &shuffled_peers { if info.roles.is_light() { - continue; + continue } if first_stage_peers.len() < LUCKY_PEERS { @@ -652,20 +643,14 @@ impl Peers { second_stage_peers.insert(peer_id.clone()); } } else { - break; + break } } // pick `LUCKY_PEERS` random light peers let lucky_light_peers = shuffled_peers .into_iter() - .filter_map(|(peer_id, info)| { - if info.roles.is_light() { - Some(peer_id) - } else { - None - } - }) + .filter_map(|(peer_id, info)| if info.roles.is_light() { Some(peer_id) } else { None }) .take(LUCKY_PEERS) .collect(); @@ -691,15 +676,9 @@ enum PendingCatchUp { /// No pending catch up requests. None, /// Pending catch up request which has not been answered yet. - Requesting { - who: PeerId, - request: CatchUpRequestMessage, - instant: Instant, - }, + Requesting { who: PeerId, request: CatchUpRequestMessage, instant: Instant }, /// Pending catch up request that was answered and is being processed. - Processing { - instant: Instant, - }, + Processing { instant: Instant }, } /// Configuration for the round catch-up mechanism. @@ -730,10 +709,7 @@ impl CatchUpConfig { fn request_allowed(&self, peer: &PeerInfo) -> bool { match self { CatchUpConfig::Disabled => false, - CatchUpConfig::Enabled { - only_from_authorities, - .. - } => match peer.roles { + CatchUpConfig::Enabled { only_from_authorities, .. } => match peer.roles { ObservedRole::Authority => true, ObservedRole::Light => false, ObservedRole::Full => !only_from_authorities, @@ -795,11 +771,12 @@ impl Inner { { let local_view = match self.local_view { None => return None, - Some(ref mut v) => if v.round == round { - return None - } else { - v - }, + Some(ref mut v) => + if v.round == round { + return None + } else { + v + }, }; let set_id = local_view.set_id; @@ -820,27 +797,24 @@ impl Inner { fn note_set(&mut self, set_id: SetId, authorities: Vec) -> MaybeMessage { { let local_view = match self.local_view { - ref mut x @ None => x.get_or_insert(LocalView::new( - set_id, - Round(1), - )), - Some(ref mut v) => if v.set_id == set_id { - let diff_authorities = - self.authorities.iter().collect::>() != - authorities.iter().collect(); - - if diff_authorities { - debug!(target: "afg", - "Gossip validator noted set {:?} twice with different authorities. \ - Was the authority set hard forked?", - set_id, - ); - self.authorities = authorities; - } - return None; - } else { - v - }, + ref mut x @ None => x.get_or_insert(LocalView::new(set_id, Round(1))), + Some(ref mut v) => + if v.set_id == set_id { + let diff_authorities = self.authorities.iter().collect::>() != + authorities.iter().collect(); + + if diff_authorities { + debug!(target: "afg", + "Gossip validator noted set {:?} twice with different authorities. \ + Was the authority set hard forked?", + set_id, + ); + self.authorities = authorities; + } + return None + } else { + v + }, }; local_view.update_set(set_id); @@ -860,11 +834,12 @@ impl Inner { { match self.local_view { None => return None, - Some(ref mut v) => if v.last_commit_height() < Some(&finalized) { - v.last_commit = Some((finalized, round, set_id)); - } else { - return None - }, + Some(ref mut v) => + if v.last_commit_height() < Some(&finalized) { + v.last_commit = Some((finalized, round, set_id)); + } else { + return None + }, }; } @@ -872,30 +847,40 @@ impl Inner { } fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { - self.local_view.as_ref() + self.local_view + .as_ref() .map(LocalView::as_view) .map(|v| v.consider_vote(round, set_id)) .unwrap_or(Consider::RejectOutOfScope) } fn consider_global(&self, set_id: SetId, number: NumberFor) -> Consider { - self.local_view.as_ref() + self.local_view + .as_ref() .map(LocalView::as_view) .map(|v| v.consider_global(set_id, &number)) .unwrap_or(Consider::RejectOutOfScope) } - fn cost_past_rejection(&self, _who: &PeerId, _round: Round, _set_id: SetId) -> ReputationChange { + fn cost_past_rejection( + &self, + _who: &PeerId, + _round: Round, + _set_id: SetId, + ) -> ReputationChange { // hardcoded for now. cost::PAST_REJECTION } - fn validate_round_message(&self, who: &PeerId, full: &VoteMessage) - -> Action - { + fn validate_round_message( + &self, + who: &PeerId, + full: &VoteMessage, + ) -> Action { match self.consider_vote(full.round, full.set_id) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectOutOfScope => return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), Consider::RejectPast => return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), Consider::Accept => {}, @@ -910,7 +895,7 @@ impl Inner { "afg.bad_msg_signature"; "signature" => ?full.message.id, ); - return Action::Discard(cost::UNKNOWN_VOTER); + return Action::Discard(cost::UNKNOWN_VOTER) } if !sp_finality_grandpa::check_message_signature( @@ -927,30 +912,34 @@ impl Inner { "afg.bad_msg_signature"; "signature" => ?full.message.id, ); - return Action::Discard(cost::BAD_SIGNATURE); + return Action::Discard(cost::BAD_SIGNATURE) } let topic = super::round_topic::(full.round.0, full.set_id.0); Action::Keep(topic, benefit::ROUND_MESSAGE) } - fn validate_commit_message(&mut self, who: &PeerId, full: &FullCommitMessage) - -> Action - { - + fn validate_commit_message( + &mut self, + who: &PeerId, + full: &FullCommitMessage, + ) -> Action { if let Err(misbehavior) = self.peers.update_commit_height(who, full.message.target_number) { - return Action::Discard(misbehavior.cost()); + return Action::Discard(misbehavior.cost()) } match self.consider_global(full.set_id, full.message.target_number) { Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), Consider::RejectPast => return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), - Consider::RejectOutOfScope => return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + Consider::RejectOutOfScope => + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), Consider::Accept => {}, } - if full.message.precommits.len() != full.message.auth_data.len() || full.message.precommits.is_empty() { + if full.message.precommits.len() != full.message.auth_data.len() || + full.message.precommits.is_empty() + { debug!(target: "afg", "Malformed compact commit"); telemetry!( self.config.telemetry; @@ -960,7 +949,7 @@ impl Inner { "auth_data_len" => ?full.message.auth_data.len(), "precommits_is_empty" => ?full.message.precommits.is_empty(), ); - return Action::Discard(cost::MALFORMED_COMMIT); + return Action::Discard(cost::MALFORMED_COMMIT) } // always discard commits initially and rebroadcast after doing full @@ -969,33 +958,33 @@ impl Inner { Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_COMMIT) } - fn validate_catch_up_message(&mut self, who: &PeerId, full: &FullCatchUpMessage) - -> Action - { + fn validate_catch_up_message( + &mut self, + who: &PeerId, + full: &FullCatchUpMessage, + ) -> Action { match &self.pending_catch_up { PendingCatchUp::Requesting { who: peer, request, instant } => { if peer != who { - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()); + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) } if request.set_id != full.set_id { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } if request.round.0 > full.message.round_number { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } if full.message.prevotes.is_empty() || full.message.precommits.is_empty() { - return Action::Discard(cost::MALFORMED_CATCH_UP); + return Action::Discard(cost::MALFORMED_CATCH_UP) } // move request to pending processing state, we won't push out // any catch up requests until we import this one (either with a // success or failure). - self.pending_catch_up = PendingCatchUp::Processing { - instant: *instant, - }; + self.pending_catch_up = PendingCatchUp::Processing { instant: *instant }; // always discard catch up messages, they're point-to-point let topic = super::global_topic::(full.set_id.0); @@ -1036,15 +1025,14 @@ impl Inner { if request.set_id.0.saturating_add(1) == local_view.set_id.0 && local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 { - return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)); + return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)) } - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) } match self.peers.peer(who) { - None => - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), Some(peer) if peer.view.round >= request.round => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), _ => {}, @@ -1052,7 +1040,7 @@ impl Inner { let last_completed_round = set_state.read().last_completed_round(); if last_completed_round.number < request.round.0 { - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) } trace!(target: "afg", "Replying to catch-up request for round {} from {} with round {}", @@ -1123,10 +1111,8 @@ impl Inner { { // send catch up request if allowed let round = peer.view.round.0 - 1; // peer.view.round is > 0 - let request = CatchUpRequestMessage { - set_id: peer.view.set_id, - round: Round(round), - }; + let request = + CatchUpRequestMessage { set_id: peer.view.set_id, round: Round(round) }; let (catch_up_allowed, catch_up_report) = self.note_catch_up_request(who, &request); @@ -1146,16 +1132,17 @@ impl Inner { (catch_up, report) } - fn import_neighbor_message(&mut self, who: &PeerId, update: NeighborPacket>) - -> (Vec, Action, Option>, Option) - { + fn import_neighbor_message( + &mut self, + who: &PeerId, + update: NeighborPacket>, + ) -> (Vec, Action, Option>, Option) { let update_res = self.peers.update_peer_state(who, update); let (cost_benefit, topics) = match update_res { Ok(view) => (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))), - Err(misbehavior) => - (misbehavior.cost(), None), + Err(misbehavior) => (misbehavior.cost(), None), }; let (catch_up, report) = match update_res { @@ -1207,14 +1194,14 @@ impl Inner { let report = match &self.pending_catch_up { PendingCatchUp::Requesting { who: peer, instant, .. } => if instant.elapsed() <= CATCH_UP_REQUEST_TIMEOUT { - return (false, None); + return (false, None) } else { // report peer for timeout Some((peer.clone(), cost::CATCH_UP_REQUEST_TIMEOUT)) }, PendingCatchUp::Processing { instant, .. } => if instant.elapsed() < CATCH_UP_PROCESS_TIMEOUT { - return (false, None); + return (false, None) } else { None }, @@ -1246,19 +1233,16 @@ impl Inner { }; if self.config.local_role.is_light() { - return false; + return false } if round_elapsed < round_duration.mul_f32(PROPAGATION_SOME) { self.peers.first_stage_peers.contains(who) } else if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { - self.peers.first_stage_peers.contains(who) - || self.peers.second_stage_peers.contains(who) + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) } else { - self.peers - .peer(who) - .map(|info| !info.roles.is_light()) - .unwrap_or(false) + self.peers.peer(who).map(|info| !info.roles.is_light()).unwrap_or(false) } } @@ -1283,13 +1267,13 @@ impl Inner { }; if self.config.local_role.is_light() { - return false; + return false } if round_elapsed < round_duration.mul_f32(PROPAGATION_ALL) { - self.peers.first_stage_peers.contains(who) - || self.peers.second_stage_peers.contains(who) - || self.peers.lucky_light_peers.contains(who) + self.peers.first_stage_peers.contains(who) || + self.peers.second_stage_peers.contains(who) || + self.peers.lucky_light_peers.contains(who) } else { true } @@ -1302,15 +1286,17 @@ pub(crate) struct Metrics { } impl Metrics { - pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { + pub(crate) fn register( + registry: &prometheus_endpoint::Registry, + ) -> Result { Ok(Self { messages_validated: register( CounterVec::new( Opts::new( "finality_grandpa_communication_gossip_validator_messages", - "Number of messages validated by the finality grandpa gossip validator." + "Number of messages validated by the finality grandpa gossip validator.", ), - &["message", "action"] + &["message", "action"], )?, registry, )?, @@ -1336,7 +1322,7 @@ impl GossipValidator { set_state: environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, telemetry: Option, - ) -> (GossipValidator, TracingUnboundedReceiver) { + ) -> (GossipValidator, TracingUnboundedReceiver) { let metrics = match prometheus_registry.map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), Some(Err(e)) => { @@ -1360,7 +1346,8 @@ impl GossipValidator { /// Note a round in the current set has started. pub(super) fn note_round(&self, round: Round, send_neighbor: F) - where F: FnOnce(Vec, NeighborPacket>) + where + F: FnOnce(Vec, NeighborPacket>), { let maybe_msg = self.inner.write().note_round(round); if let Some((to, msg)) = maybe_msg { @@ -1371,7 +1358,8 @@ impl GossipValidator { /// Note that a voter set with given ID has started. Updates the current set to given /// value and initializes the round to 0. pub(super) fn note_set(&self, set_id: SetId, authorities: Vec, send_neighbor: F) - where F: FnOnce(Vec, NeighborPacket>) + where + F: FnOnce(Vec, NeighborPacket>), { let maybe_msg = self.inner.write().note_set(set_id, authorities); if let Some((to, msg)) = maybe_msg { @@ -1386,14 +1374,10 @@ impl GossipValidator { set_id: SetId, finalized: NumberFor, send_neighbor: F, - ) - where F: FnOnce(Vec, NeighborPacket>) + ) where + F: FnOnce(Vec, NeighborPacket>), { - let maybe_msg = self.inner.write().note_commit_finalized( - round, - set_id, - finalized, - ); + let maybe_msg = self.inner.write().note_commit_finalized(round, set_id, finalized); if let Some((to, msg)) = maybe_msg { send_neighbor(to, msg); @@ -1401,7 +1385,7 @@ impl GossipValidator { } /// Note that we've processed a catch up message. - pub(super) fn note_catch_up_message_processed(&self) { + pub(super) fn note_catch_up_message_processed(&self) { self.inner.write().note_catch_up_message_processed(); } @@ -1409,9 +1393,11 @@ impl GossipValidator { let _ = self.report_sender.unbounded_send(PeerReport { who, cost_benefit }); } - pub(super) fn do_validate(&self, who: &PeerId, mut data: &[u8]) - -> (Action, Vec, Option>) - { + pub(super) fn do_validate( + &self, + who: &PeerId, + mut data: &[u8], + ) -> (Action, Vec, Option>) { let mut broadcast_topics = Vec::new(); let mut peer_reply = None; @@ -1430,10 +1416,10 @@ impl GossipValidator { }, Ok(GossipMessage::Neighbor(update)) => { message_name = Some("neighbor"); - let (topics, action, catch_up, report) = self.inner.write().import_neighbor_message( - who, - update.into_neighbor_packet(), - ); + let (topics, action, catch_up, report) = self + .inner + .write() + .import_neighbor_message(who, update.into_neighbor_packet()); if let Some((peer, cost_benefit)) = report { self.report(peer, cost_benefit); @@ -1442,22 +1428,19 @@ impl GossipValidator { broadcast_topics = topics; peer_reply = catch_up; action - } + }, Ok(GossipMessage::CatchUp(ref message)) => { message_name = Some("catch_up"); self.inner.write().validate_catch_up_message(who, message) }, Ok(GossipMessage::CatchUpRequest(request)) => { message_name = Some("catch_up_request"); - let (reply, action) = self.inner.write().handle_catch_up_request( - who, - request, - &self.set_state, - ); + let (reply, action) = + self.inner.write().handle_catch_up_request(who, request, &self.set_state); peer_reply = reply; action - } + }, Err(e) => { message_name = None; debug!(target: "afg", "Error decoding message: {}", e); @@ -1470,7 +1453,7 @@ impl GossipValidator { let len = std::cmp::min(i32::MAX as usize, data.len()) as i32; Action::Discard(Misbehavior::UndecodablePacket(len).cost()) - } + }, } }; @@ -1494,17 +1477,20 @@ impl GossipValidator { } impl sc_network_gossip::Validator for GossipValidator { - fn new_peer(&self, context: &mut dyn ValidatorContext, who: &PeerId, roles: ObservedRole) { + fn new_peer( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + roles: ObservedRole, + ) { let packet = { let mut inner = self.inner.write(); inner.peers.new_peer(who.clone(), roles); - inner.local_view.as_ref().map(|v| { - NeighborPacket { - round: v.round, - set_id: v.set_id, - commit_finalized_height: *v.last_commit_height().unwrap_or(&Zero::zero()), - } + inner.local_view.as_ref().map(|v| NeighborPacket { + round: v.round, + set_id: v.set_id, + commit_finalized_height: *v.last_commit_height().unwrap_or(&Zero::zero()), }) }; @@ -1540,15 +1526,15 @@ impl sc_network_gossip::Validator for GossipValidator { self.report(who.clone(), cb); sc_network_gossip::ValidationResult::ProcessAndDiscard(topic) - } + }, Action::Discard(cb) => { self.report(who.clone(), cb); sc_network_gossip::ValidationResult::Discard - } + }, } } @@ -1573,7 +1559,7 @@ impl sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator sc_network_gossip::Validator for GossipValidator false, Ok(GossipMessage::CatchUpRequest(_)) => false, Ok(GossipMessage::CatchUp(_)) => false, @@ -1638,7 +1625,7 @@ impl sc_network_gossip::Validator for GossipValidator return true, - Some((Some(_), _)) => return false, // round messages don't require further checking. + Some((Some(_), _)) => return false, /* round messages don't require further checking. */ Some((None, _)) => {}, }; @@ -1652,11 +1639,10 @@ impl sc_network_gossip::Validator for GossipValidator true, Ok(GossipMessage::Commit(full)) => match local_view.last_commit { Some((number, round, set_id)) => - // we expire any commit message that doesn't target the same block - // as our best commit or isn't from the same round and set id + // we expire any commit message that doesn't target the same block + // as our best commit or isn't from the same round and set id !(full.message.target_number == number && - full.round == round && - full.set_id == set_id), + full.round == round && full.set_id == set_id), None => true, }, Ok(_) => true, @@ -1673,8 +1659,7 @@ pub(super) struct PeerReport { #[cfg(test)] mod tests { - use super::*; - use super::environment::SharedVoterSetState; + use super::{environment::SharedVoterSetState, *}; use sc_network::config::Role; use sc_network_gossip::Validator as GossipValidatorT; use sc_network_test::Block; @@ -1695,19 +1680,14 @@ mod tests { // dummy voter set state fn voter_set_state() -> SharedVoterSetState { - use crate::authorities::AuthoritySet; - use crate::environment::VoterSetState; + use crate::{authorities::AuthoritySet, environment::VoterSetState}; let base = (H256::zero(), 0); let voters = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; let voters = AuthoritySet::genesis(voters).unwrap(); - let set_state = VoterSetState::live( - 0, - &voters, - base, - ); + let set_state = VoterSetState::live(0, &voters, base); set_state.into() } @@ -1752,11 +1732,8 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - let update = NeighborPacket { - round: Round(5), - set_id: SetId(10), - commit_finalized_height: 50, - }; + let update = + NeighborPacket { round: Round(5), set_id: SetId(10), commit_finalized_height: 50 }; let res = peers.update_peer_state(&id, update.clone()); assert!(res.unwrap().is_none()); @@ -1771,29 +1748,17 @@ mod tests { #[test] fn update_peer_state() { - let update1 = NeighborPacket { - round: Round(5), - set_id: SetId(10), - commit_finalized_height: 50u32, - }; + let update1 = + NeighborPacket { round: Round(5), set_id: SetId(10), commit_finalized_height: 50u32 }; - let update2 = NeighborPacket { - round: Round(6), - set_id: SetId(10), - commit_finalized_height: 60, - }; + let update2 = + NeighborPacket { round: Round(6), set_id: SetId(10), commit_finalized_height: 60 }; - let update3 = NeighborPacket { - round: Round(2), - set_id: SetId(11), - commit_finalized_height: 61, - }; + let update3 = + NeighborPacket { round: Round(2), set_id: SetId(11), commit_finalized_height: 61 }; - let update4 = NeighborPacket { - round: Round(3), - set_id: SetId(11), - commit_finalized_height: 80, - }; + let update4 = + NeighborPacket { round: Round(3), set_id: SetId(11), commit_finalized_height: 80 }; let mut peers = Peers::default(); let id = PeerId::random(); @@ -1820,11 +1785,13 @@ mod tests { let id = PeerId::random(); peers.new_peer(id.clone(), ObservedRole::Authority); - peers.update_peer_state(&id, NeighborPacket { - round: Round(10), - set_id: SetId(10), - commit_finalized_height: 10, - }).unwrap().unwrap(); + peers + .update_peer_state( + &id, + NeighborPacket { round: Round(10), set_id: SetId(10), commit_finalized_height: 10 }, + ) + .unwrap() + .unwrap(); let mut check_update = move |update: NeighborPacket<_>| { let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); @@ -1853,12 +1820,7 @@ mod tests { #[test] fn messages_not_expired_immediately() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let set_id = 1; @@ -1890,12 +1852,7 @@ mod tests { fn message_from_unknown_authority_discarded() { assert!(cost::UNKNOWN_VOTER != cost::BAD_SIGNATURE); - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let set_id = 1; let auth = AuthorityId::from_slice(&[1u8; 32]); let peer = PeerId::random(); @@ -1904,31 +1861,37 @@ mod tests { val.note_round(Round(1), |_, _| {}); let inner = val.inner.read(); - let unknown_voter = inner.validate_round_message(&peer, &VoteMessage { - round: Round(1), - set_id: SetId(set_id), - message: SignedMessage:: { - message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { - target_hash: Default::default(), - target_number: 10, - }), - signature: Default::default(), - id: AuthorityId::from_slice(&[2u8; 32]), - } - }); + let unknown_voter = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage:: { + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: Default::default(), + id: AuthorityId::from_slice(&[2u8; 32]), + }, + }, + ); - let bad_sig = inner.validate_round_message(&peer, &VoteMessage { - round: Round(1), - set_id: SetId(set_id), - message: SignedMessage:: { - message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { - target_hash: Default::default(), - target_number: 10, - }), - signature: Default::default(), - id: auth.clone(), - } - }); + let bad_sig = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage:: { + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: Default::default(), + id: auth.clone(), + }, + }, + ); assert_eq!(unknown_voter, Action::Discard(cost::UNKNOWN_VOTER)); assert_eq!(bad_sig, Action::Discard(cost::BAD_SIGNATURE)); @@ -1936,12 +1899,7 @@ mod tests { #[test] fn unsolicited_catch_up_messages_discarded() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); let set_id = 1; let auth = AuthorityId::from_slice(&[1u8; 32]); @@ -1952,16 +1910,19 @@ mod tests { let validate_catch_up = || { let mut inner = val.inner.write(); - inner.validate_catch_up_message(&peer, &FullCatchUpMessage { - set_id: SetId(set_id), - message: finality_grandpa::CatchUp { - round_number: 10, - prevotes: Default::default(), - precommits: Default::default(), - base_hash: Default::default(), - base_number: Default::default(), - } - }) + inner.validate_catch_up_message( + &peer, + &FullCatchUpMessage { + set_id: SetId(set_id), + message: finality_grandpa::CatchUp { + round_number: 10, + prevotes: Default::default(), + precommits: Default::default(), + base_hash: Default::default(), + base_number: Default::default(), + }, + }, + ) }; // the catch up is discarded because we have no pending request @@ -1969,10 +1930,7 @@ mod tests { let noted = val.inner.write().note_catch_up_request( &peer, - &CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(10), - } + &CatchUpRequestMessage { set_id: SetId(set_id), round: Round(10) }, ); assert!(noted.0); @@ -1998,20 +1956,13 @@ mod tests { let mut current_rounds = environment::CurrentRounds::new(); current_rounds.insert(3, environment::HasVoted::No); - let set_state = environment::VoterSetState::::Live { - completed_rounds, - current_rounds, - }; + let set_state = + environment::VoterSetState::::Live { completed_rounds, current_rounds }; set_state.into() }; - let (val, _) = GossipValidator::::new( - config(), - set_state.clone(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None, None); let set_id = 1; let auth = AuthorityId::from_slice(&[1u8; 32]); @@ -2027,10 +1978,7 @@ mod tests { let res = inner.handle_catch_up_request( &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(10), - }, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(10) }, &set_state, ); @@ -2040,10 +1988,7 @@ mod tests { let res = inner.handle_catch_up_request( &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(2), - }, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(2) }, &set_state, ); @@ -2062,12 +2007,7 @@ mod tests { #[test] fn detects_honest_out_of_scope_catch_requests() { let set_state = voter_set_state(); - let (val, _) = GossipValidator::::new( - config(), - set_state.clone(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None, None); // the validator starts at set id 2 val.note_set(SetId(2), Vec::new(), |_, _| {}); @@ -2081,10 +2021,7 @@ mod tests { let mut inner = val.inner.write(); inner.handle_catch_up_request( &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(round), - }, + CatchUpRequestMessage { set_id: SetId(set_id), round: Round(round) }, &set_state, ) }; @@ -2104,51 +2041,28 @@ mod tests { // the validator is at set id 2 and round 0. requests for set id 1 // should not be answered but they should be considered an honest // mistake - assert_res( - send_request(1, 1), - true, - ); + assert_res(send_request(1, 1), true); - assert_res( - send_request(1, 10), - true, - ); + assert_res(send_request(1, 10), true); // requests for set id 0 should be considered out of scope - assert_res( - send_request(0, 1), - false, - ); + assert_res(send_request(0, 1), false); - assert_res( - send_request(0, 10), - false, - ); + assert_res(send_request(0, 10), false); // after the validator progresses further than CATCH_UP_THRESHOLD in set // id 2, any request for set id 1 should no longer be considered an // honest mistake. val.note_round(Round(3), |_, _| {}); - assert_res( - send_request(1, 1), - false, - ); + assert_res(send_request(1, 1), false); - assert_res( - send_request(1, 2), - false, - ); + assert_res(send_request(1, 2), false); } #[test] fn issues_catch_up_request_on_neighbor_packet_import() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2218,12 +2132,7 @@ mod tests { c }; - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2238,11 +2147,7 @@ mod tests { // we should get `None`. let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, ); match catch_up_request { @@ -2253,12 +2158,7 @@ mod tests { #[test] fn doesnt_send_catch_up_requests_to_non_authorities_when_observer_enabled() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2268,17 +2168,16 @@ mod tests { let peer_authority = PeerId::random(); let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_authority.clone(), ObservedRole::Authority); + val.inner + .write() + .peers + .new_peer(peer_authority.clone(), ObservedRole::Authority); val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); let import_neighbor_message = |peer| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, ); catch_up_request @@ -2314,12 +2213,7 @@ mod tests { c }; - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2331,11 +2225,7 @@ mod tests { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer_full, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, + NeighborPacket { round: Round(42), set_id: SetId(1), commit_finalized_height: 50 }, ); // importing a neighbor message from a peer in the same set in a later @@ -2354,12 +2244,7 @@ mod tests { #[test] fn doesnt_expire_next_round_messages() { // NOTE: this is a regression test - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None, None); // the validator starts at set id 1. val.note_set(SetId(1), Vec::new(), |_, _| {}); @@ -2373,12 +2258,7 @@ mod tests { // we accept messages from rounds 9, 10 and 11 // therefore neither of those should be considered expired for round in &[9, 10, 11] { - assert!( - !is_expired( - crate::communication::round_topic::(*round, 1), - &[], - ) - ) + assert!(!is_expired(crate::communication::round_topic::(*round, 1), &[],)) } } @@ -2388,12 +2268,7 @@ mod tests { config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race let round_duration = config.gossip_duration * ROUND_DURATION; - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - None, - ); + let (val, _) = GossipValidator::::new(config, voter_set_state(), None, None); // the validator start at set id 0 val.note_set(SetId(0), Vec::new(), |_, _| {}); @@ -2411,10 +2286,7 @@ mod tests { .peers .new_peer(authorities[i].clone(), ObservedRole::Authority); - val.inner - .write() - .peers - .new_peer(full_nodes[i].clone(), ObservedRole::Full); + val.inner.write().peers.new_peer(full_nodes[i].clone(), ObservedRole::Full); } let test = |rounds_elapsed, peers| { @@ -2458,11 +2330,7 @@ mod tests { sum / n } - let all_peers = authorities - .iter() - .chain(full_nodes.iter()) - .cloned() - .collect(); + let all_peers = authorities.iter().chain(full_nodes.iter()).cloned().collect(); // on the first attempt we will only gossip to 4 peers, either // authorities or full nodes, but we'll guarantee that half of those @@ -2497,10 +2365,7 @@ mod tests { // add a new light client as peer let light_peer = PeerId::random(); - val.inner - .write() - .peers - .new_peer(light_peer.clone(), ObservedRole::Light); + val.inner.write().peers.new_peer(light_peer.clone(), ObservedRole::Light); assert!(!val.message_allowed()( &light_peer, @@ -2529,11 +2394,7 @@ mod tests { .peers .update_peer_state( &light_peer, - NeighborPacket { - round: Round(1), - set_id: SetId(0), - commit_finalized_height: 1, - }, + NeighborPacket { round: Round(1), set_id: SetId(0), commit_finalized_height: 1 }, ) .unwrap(); @@ -2576,30 +2437,20 @@ mod tests { // add a new peer at set id 1 let peer1 = PeerId::random(); - val.inner - .write() - .peers - .new_peer(peer1.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer1.clone(), ObservedRole::Authority); val.inner .write() .peers .update_peer_state( &peer1, - NeighborPacket { - round: Round(1), - set_id: SetId(1), - commit_finalized_height: 1, - }, + NeighborPacket { round: Round(1), set_id: SetId(1), commit_finalized_height: 1 }, ) .unwrap(); // peer2 will default to set id 0 let peer2 = PeerId::random(); - val.inner - .write() - .peers - .new_peer(peer2.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer2.clone(), ObservedRole::Authority); // create a commit for round 1 of set id 1 // targeting a block at height 2 @@ -2677,22 +2528,15 @@ mod tests { // a commit message for round 1 that finalizes the same height as we // have observed previously should not be expired - assert!(!message_expired( - crate::communication::global_topic::(1), - &commit(1, 1, 2), - )); + assert!( + !message_expired(crate::communication::global_topic::(1), &commit(1, 1, 2),) + ); // it should be expired if it is for a lower block - assert!(message_expired( - crate::communication::global_topic::(1), - &commit(1, 1, 1), - )); + assert!(message_expired(crate::communication::global_topic::(1), &commit(1, 1, 1),)); // or the same block height but from the previous round - assert!(message_expired( - crate::communication::global_topic::(1), - &commit(0, 1, 2), - )); + assert!(message_expired(crate::communication::global_topic::(1), &commit(0, 1, 2),)); } #[test] diff --git a/substrate/client/finality-grandpa/src/communication/mod.rs b/substrate/client/finality-grandpa/src/communication/mod.rs index 45bc72223e4b5e04484460023a29ad11178ccfa2..077dc6a3f96b3835afc065a080089c3604c534e3 100644 --- a/substrate/client/finality-grandpa/src/communication/mod.rs +++ b/substrate/client/finality-grandpa/src/communication/mod.rs @@ -29,37 +29,36 @@ //! In the future, there will be a fallback for allowing sending the same message //! under certain conditions that are used to un-stick the protocol. -use futures::{prelude::*, channel::mpsc}; +use futures::{channel::mpsc, prelude::*}; use log::{debug, trace}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use std::{pin::Pin, sync::Arc, task::{Context, Poll}}; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; -use sp_keystore::SyncCryptoStorePtr; -use finality_grandpa::Message::{Prevote, Precommit, PrimaryPropose}; -use finality_grandpa::{voter, voter_set::VoterSet}; +use finality_grandpa::{ + voter, + voter_set::VoterSet, + Message::{Precommit, Prevote, PrimaryPropose}, +}; +use parity_scale_codec::{Decode, Encode}; use sc_network::{NetworkService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; -use parity_scale_codec::{Encode, Decode}; -use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; use crate::{ - CatchUp, Commit, CommunicationIn, CommunicationOutH, - CompactCommit, Error, Message, SignedMessage, + environment::HasVoted, CatchUp, Commit, CommunicationIn, CommunicationOutH, CompactCommit, + Error, Message, SignedMessage, }; -use crate::environment::HasVoted; use gossip::{ - FullCatchUpMessage, - FullCommitMessage, - GossipMessage, - GossipValidator, - PeerReport, - VoteMessage, -}; -use sp_finality_grandpa::{ - AuthorityId, AuthoritySignature, SetId as SetIdNumber, RoundNumber, + FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; +use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId as SetIdNumber}; use sp_utils::mpsc::TracingUnboundedReceiver; pub mod gossip; @@ -89,11 +88,13 @@ mod cost { pub(super) const INVALID_CATCH_UP: Rep = Rep::new(-5000, "Grandpa: Invalid catch-up"); pub(super) const INVALID_COMMIT: Rep = Rep::new(-5000, "Grandpa: Invalid commit"); pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Out-of-scope message"); - pub(super) const CATCH_UP_REQUEST_TIMEOUT: Rep = Rep::new(-200, "Grandpa: Catch-up request timeout"); + pub(super) const CATCH_UP_REQUEST_TIMEOUT: Rep = + Rep::new(-200, "Grandpa: Catch-up request timeout"); // cost of answering a catch up request pub(super) const CATCH_UP_REPLY: Rep = Rep::new(-200, "Grandpa: Catch-up reply"); - pub(super) const HONEST_OUT_OF_SCOPE_CATCH_UP: Rep = Rep::new(-200, "Grandpa: Out-of-scope catch-up"); + pub(super) const HONEST_OUT_OF_SCOPE_CATCH_UP: Rep = + Rep::new(-200, "Grandpa: Out-of-scope catch-up"); } // benefit scalars for reporting peers. @@ -144,14 +145,25 @@ pub trait Network: GossipNetwork + Clone + Send + 'static /// If the given vector of peers is empty then the underlying implementation /// should make a best effort to fetch the block from any peers it is /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); } -impl Network for Arc> where +impl Network for Arc> +where B: BlockT, H: sc_network::ExHashT, { - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + fn set_sync_fork_request( + &self, + peers: Vec, + hash: B::Hash, + number: NumberFor, + ) { NetworkService::set_sync_fork_request(self, peers, hash, number) } } @@ -179,14 +191,12 @@ pub(crate) struct NetworkBridge> { neighbor_sender: periodic::NeighborPacketSender, /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. - // // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, // thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. neighbor_packet_worker: Arc>>, /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the /// gossip engine. - // // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, // thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given that it is // just an `UnboundedReceiver`, one could also switch to a multi-producer-*multi*-consumer @@ -210,12 +220,8 @@ impl> NetworkBridge { prometheus_registry: Option<&Registry>, telemetry: Option, ) -> Self { - let (validator, report_stream) = GossipValidator::new( - config, - set_state.clone(), - prometheus_registry, - telemetry.clone(), - ); + let (validator, report_stream) = + GossipValidator::new(config, set_state.clone(), prometheus_registry, telemetry.clone()); let validator = Arc::new(validator); let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( @@ -239,18 +245,13 @@ impl> NetworkBridge { validator.note_round(Round(round.number), |_, _| {}); for signed in round.votes.iter() { - let message = gossip::GossipMessage::Vote( - gossip::VoteMessage:: { - message: signed.clone(), - round: Round(round.number), - set_id: SetId(set_id), - } - ); + let message = gossip::GossipMessage::Vote(gossip::VoteMessage:: { + message: signed.clone(), + round: Round(round.number), + set_id: SetId(set_id), + }); - gossip_engine.lock().register_gossip_message( - topic, - message.encode(), - ); + gossip_engine.lock().register_gossip_message(topic, message.encode()); } trace!(target: "afg", @@ -263,7 +264,8 @@ impl> NetworkBridge { } } - let (neighbor_packet_worker, neighbor_packet_sender) = periodic::NeighborPacketWorker::new(); + let (neighbor_packet_worker, neighbor_packet_sender) = + periodic::NeighborPacketWorker::new(); NetworkBridge { service, @@ -277,12 +279,7 @@ impl> NetworkBridge { } /// Note the beginning of a new round to the `GossipValidator`. - pub(crate) fn note_round( - &self, - round: Round, - set_id: SetId, - voters: &VoterSet, - ) { + pub(crate) fn note_round(&self, round: Round, set_id: SetId, voters: &VoterSet) { // is a no-op if currently in that set. self.validator.note_set( set_id, @@ -290,10 +287,8 @@ impl> NetworkBridge { |to, neighbor| self.neighbor_sender.send(to, neighbor), ); - self.validator.note_round( - round, - |to, neighbor| self.neighbor_sender.send(to, neighbor), - ); + self.validator + .note_round(round, |to, neighbor| self.neighbor_sender.send(to, neighbor)); } /// Get a stream of signature-checked round messages from the network as well as a sink for round messages to the @@ -305,15 +300,8 @@ impl> NetworkBridge { set_id: SetId, voters: Arc>, has_voted: HasVoted, - ) -> ( - impl Stream> + Unpin, - OutgoingMessages, - ) { - self.note_round( - round, - set_id, - &*voters, - ); + ) -> (impl Stream> + Unpin, OutgoingMessages) { + self.note_round(round, set_id, &*voters); let keystore = keystore.and_then(|ks| { let id = ks.local_id(); @@ -326,20 +314,20 @@ impl> NetworkBridge { let topic = round_topic::(round.0, set_id.0); let telemetry = self.telemetry.clone(); - let incoming = self.gossip_engine.lock().messages_for(topic) - .filter_map(move |notification| { + let incoming = + self.gossip_engine.lock().messages_for(topic).filter_map(move |notification| { let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); match decoded { Err(ref e) => { debug!(target: "afg", "Skipping malformed message {:?}: {}", notification, e); future::ready(None) - } + }, Ok(GossipMessage::Vote(msg)) => { // check signature. if !voters.contains(&msg.message.id) { debug!(target: "afg", "Skipping message from unknown voter {}", msg.message.id); - return future::ready(None); + return future::ready(None) } if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { @@ -378,11 +366,11 @@ impl> NetworkBridge { } future::ready(Some(msg.message)) - } + }, _ => { debug!(target: "afg", "Skipping unknown message type"); future::ready(None) - } + }, } }); @@ -458,7 +446,7 @@ impl> NetworkBridge { &self, peers: Vec, hash: B::Hash, - number: NumberFor + number: NumberFor, ) { Network::set_sync_fork_request(&self.service, peers, hash, number) } @@ -473,9 +461,10 @@ impl> Future for NetworkBridge { Poll::Ready(Some((to, packet))) => { self.gossip_engine.lock().send_message(to, packet.encode()); }, - Poll::Ready(None) => return Poll::Ready( - Err(Error::Network("Neighbor packet worker stream closed.".into())) - ), + Poll::Ready(None) => + return Poll::Ready(Err(Error::Network( + "Neighbor packet worker stream closed.".into(), + ))), Poll::Pending => break, } } @@ -485,17 +474,17 @@ impl> Future for NetworkBridge { Poll::Ready(Some(PeerReport { who, cost_benefit })) => { self.gossip_engine.lock().report(who, cost_benefit); }, - Poll::Ready(None) => return Poll::Ready( - Err(Error::Network("Gossip validator report stream closed.".into())) - ), + Poll::Ready(None) => + return Poll::Ready(Err(Error::Network( + "Gossip validator report stream closed.".into(), + ))), Poll::Pending => break, } } match self.gossip_engine.lock().poll_unpin(cx) { - Poll::Ready(()) => return Poll::Ready( - Err(Error::Network("Gossip engine future finished.".into())) - ), + Poll::Ready(()) => + return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))), Poll::Pending => {}, } @@ -513,18 +502,14 @@ fn incoming_global( ) -> impl Stream> { let process_commit = { let telemetry = telemetry.clone(); - move | - msg: FullCommitMessage, - mut notification: sc_network_gossip::TopicNotification, - gossip_engine: &Arc>>, - gossip_validator: &Arc>, - voters: &VoterSet, - | { + move |msg: FullCommitMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { if voters.len().get() <= TELEMETRY_VOTERS_LIMIT { let precommits_signed_by: Vec = - msg.message.auth_data.iter().map(move |(_, a)| { - format!("{}", a) - }).collect(); + msg.message.auth_data.iter().map(move |(_, a)| format!("{}", a)).collect(); telemetry!( telemetry; @@ -547,7 +532,7 @@ fn incoming_global( gossip_engine.lock().report(who, cost); } - return None; + return None } let round = msg.round; @@ -570,13 +555,13 @@ fn incoming_global( ); gossip_engine.lock().gossip_message(topic, notification.message.clone(), false); - } + }, voter::CommitProcessingOutcome::Bad(_) => { // report peer and do not gossip. if let Some(who) = notification.sender.take() { gossip_engine.lock().report(who, cost::INVALID_COMMIT); } - } + }, }; let cb = voter::Callback::Work(Box::new(cb)); @@ -585,27 +570,21 @@ fn incoming_global( } }; - let process_catch_up = move | - msg: FullCatchUpMessage, - mut notification: sc_network_gossip::TopicNotification, - gossip_engine: &Arc>>, - gossip_validator: &Arc>, - voters: &VoterSet, - | { + let process_catch_up = move |msg: FullCatchUpMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { let gossip_validator = gossip_validator.clone(); let gossip_engine = gossip_engine.clone(); - if let Err(cost) = check_catch_up::( - &msg.message, - voters, - msg.set_id, - telemetry.clone(), - ) { + if let Err(cost) = check_catch_up::(&msg.message, voters, msg.set_id, telemetry.clone()) + { if let Some(who) = notification.sender { gossip_engine.lock().report(who, cost); } - return None; + return None } let cb = move |outcome| { @@ -624,7 +603,10 @@ fn incoming_global( Some(voter::CommunicationIn::CatchUp(msg.message, cb)) }; - gossip_engine.clone().lock().messages_for(topic) + gossip_engine + .clone() + .lock() + .messages_for(topic) .filter_map(|notification| { // this could be optimized by decoding piecewise. let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); @@ -642,7 +624,7 @@ fn incoming_global( _ => { debug!(target: "afg", "Skipping unknown message type"); None - } + }, }) }) } @@ -688,15 +670,15 @@ pub(crate) struct OutgoingMessages { impl Unpin for OutgoingMessages {} -impl Sink> for OutgoingMessages -{ +impl Sink> for OutgoingMessages { type Error = Error; fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Sink::poll_ready(Pin::new(&mut self.sender), cx) - .map(|elem| { elem.map_err(|e| { + Sink::poll_ready(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { Error::Network(format!("Failed to poll_ready channel sender: {:?}", e)) - })}) + }) + }) } fn start_send(mut self: Pin<&mut Self>, mut msg: Message) -> Result<(), Self::Error> { @@ -725,11 +707,13 @@ impl Sink> for OutgoingMessages keystore.local_id().clone(), self.round, self.set_id, - ).ok_or_else( - || Error::Signing(format!( - "Failed to sign GRANDPA vote for round {} targetting {:?}", self.round, target_hash + ) + .ok_or_else(|| { + Error::Signing(format!( + "Failed to sign GRANDPA vote for round {} targetting {:?}", + self.round, target_hash )) - )?; + })?; let message = GossipMessage::Vote(VoteMessage:: { message: signed.clone(), @@ -762,7 +746,7 @@ impl Sink> for OutgoingMessages // forward the message to the inner sender. return self.sender.start_send(signed).map_err(|e| { Error::Network(format!("Failed to start_send on channel sender: {:?}", e)) - }); + }) }; Ok(()) @@ -773,10 +757,11 @@ impl Sink> for OutgoingMessages } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Sink::poll_close(Pin::new(&mut self.sender), cx) - .map(|elem| { elem.map_err(|e| { + Sink::poll_close(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { Error::Network(format!("Failed to poll_close channel sender: {:?}", e)) - })}) + }) + }) } } @@ -799,23 +784,22 @@ fn check_compact_commit( if let Some(weight) = voters.get(id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } } else { debug!(target: "afg", "Skipping commit containing unknown voter {}", id); - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_COMMIT); + return Err(cost::MALFORMED_COMMIT) } // check signatures on all contained precommits. let mut buf = Vec::new(); - for (i, (precommit, &(ref sig, ref id))) in msg.precommits.iter() - .zip(&msg.auth_data) - .enumerate() + for (i, (precommit, &(ref sig, ref id))) in + msg.precommits.iter().zip(&msg.auth_data).enumerate() { use crate::communication::gossip::Misbehavior; use finality_grandpa::Message as GrandpaMessage; @@ -839,9 +823,10 @@ fn check_compact_commit( signatures_checked: i as i32, blocks_loaded: 0, equivocations_caught: 0, - }.cost(); + } + .cost(); - return Err(cost); + return Err(cost) } } @@ -863,7 +848,7 @@ fn check_catch_up( // check total weight is not out of range for a set of votes. fn check_weight<'a>( voters: &'a VoterSet, - votes: impl Iterator, + votes: impl Iterator, full_threshold: u64, ) -> Result<(), ReputationChange> { let mut total_weight = 0; @@ -872,32 +857,24 @@ fn check_catch_up( if let Some(weight) = voters.get(&id).map(|info| info.weight()) { total_weight += weight.get(); if total_weight > full_threshold { - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } } else { debug!(target: "afg", "Skipping catch up message containing unknown voter {}", id); - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } } if total_weight < voters.threshold().get() { - return Err(cost::MALFORMED_CATCH_UP); + return Err(cost::MALFORMED_CATCH_UP) } Ok(()) } - check_weight( - voters, - msg.prevotes.iter().map(|vote| &vote.id), - full_threshold, - )?; + check_weight(voters, msg.prevotes.iter().map(|vote| &vote.id), full_threshold)?; - check_weight( - voters, - msg.precommits.iter().map(|vote| &vote.id), - full_threshold, - )?; + check_weight(voters, msg.precommits.iter().map(|vote| &vote.id), full_threshold)?; fn check_signatures<'a, B, I>( messages: I, @@ -906,9 +883,10 @@ fn check_catch_up( mut signatures_checked: usize, buf: &mut Vec, telemetry: Option, - ) -> Result where + ) -> Result + where B: BlockT, - I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, + I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, { use crate::communication::gossip::Misbehavior; @@ -916,12 +894,7 @@ fn check_catch_up( signatures_checked += 1; if !sp_finality_grandpa::check_message_signature_with_buffer( - &msg, - id, - sig, - round, - set_id, - buf, + &msg, id, sig, round, set_id, buf, ) { debug!(target: "afg", "Bad catch up message signature {}", id); telemetry!( @@ -933,9 +906,10 @@ fn check_catch_up( let cost = Misbehavior::BadCatchUpMessage { signatures_checked: signatures_checked as i32, - }.cost(); + } + .cost(); - return Err(cost); + return Err(cost) } } @@ -959,7 +933,11 @@ fn check_catch_up( // check signatures on all contained precommits. let _ = check_signatures::( msg.precommits.iter().map(|vote| { - (finality_grandpa::Message::Precommit(vote.precommit.clone()), &vote.id, &vote.signature) + ( + finality_grandpa::Message::Precommit(vote.precommit.clone()), + &vote.id, + &vote.signature, + ) }), msg.round_number, set_id.0, @@ -1009,9 +987,12 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { Poll::Ready(Ok(())) } - fn start_send(self: Pin<&mut Self>, input: (RoundNumber, Commit)) -> Result<(), Self::Error> { + fn start_send( + self: Pin<&mut Self>, + input: (RoundNumber, Commit), + ) -> Result<(), Self::Error> { if !self.is_voter { - return Ok(()); + return Ok(()) } let (round, commit) = input; @@ -1024,7 +1005,9 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { "target_number" => ?commit.target_number, "target_hash" => ?commit.target_hash, ); - let (precommits, auth_data) = commit.precommits.into_iter() + let (precommits, auth_data) = commit + .precommits + .into_iter() .map(|signed| (signed.precommit, (signed.signature, signed.id))) .unzip(); @@ -1032,7 +1015,7 @@ impl Sink<(RoundNumber, Commit)> for CommitsOut { target_hash: commit.target_hash, target_number: commit.target_number, precommits, - auth_data + auth_data, }; let message = GossipMessage::Commit(FullCommitMessage:: { diff --git a/substrate/client/finality-grandpa/src/communication/periodic.rs b/substrate/client/finality-grandpa/src/communication/periodic.rs index 377882ed5dd2d5d4bb2136fbd5314e0d557b890c..a3c7b9380b25f299ee13bbff1f6da28e98983c3b 100644 --- a/substrate/client/finality-grandpa/src/communication/periodic.rs +++ b/substrate/client/finality-grandpa/src/communication/periodic.rs @@ -18,15 +18,19 @@ //! Periodic rebroadcast of neighbor packets. +use futures::{future::FutureExt as _, prelude::*, ready, stream::Stream}; use futures_timer::Delay; -use futures::{future::{FutureExt as _}, prelude::*, ready, stream::Stream}; use log::debug; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; +use super::gossip::{GossipMessage, NeighborPacket}; use sc_network::PeerId; -use sp_runtime::traits::{NumberFor, Block as BlockT}; -use super::gossip::{NeighborPacket, GossipMessage}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; // How often to rebroadcast, in cases where no new packets are created. const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); @@ -34,7 +38,7 @@ const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( - TracingUnboundedSender<(Vec, NeighborPacket>)> + TracingUnboundedSender<(Vec, NeighborPacket>)>, ); impl NeighborPacketSender { @@ -63,24 +67,20 @@ pub(super) struct NeighborPacketWorker { impl Unpin for NeighborPacketWorker {} impl NeighborPacketWorker { - pub(super) fn new() -> (Self, NeighborPacketSender){ - let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)> - ("mpsc_grandpa_neighbor_packet_worker"); + pub(super) fn new() -> (Self, NeighborPacketSender) { + let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( + "mpsc_grandpa_neighbor_packet_worker", + ); let delay = Delay::new(REBROADCAST_AFTER); - (NeighborPacketWorker { - last: None, - delay, - rx, - }, NeighborPacketSender(tx)) + (NeighborPacketWorker { last: None, delay, rx }, NeighborPacketSender(tx)) } } -impl Stream for NeighborPacketWorker { +impl Stream for NeighborPacketWorker { type Item = (Vec, GossipMessage); - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> - { + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let this = &mut *self; match this.rx.poll_next_unpin(cx) { Poll::Ready(None) => return Poll::Ready(None), @@ -88,8 +88,8 @@ impl Stream for NeighborPacketWorker { this.delay.reset(REBROADCAST_AFTER); this.last = Some((to.clone(), packet.clone())); - return Poll::Ready(Some((to, GossipMessage::::from(packet)))); - } + return Poll::Ready(Some((to, GossipMessage::::from(packet)))) + }, // Don't return yet, maybe the timer fired. Poll::Pending => {}, }; @@ -104,10 +104,10 @@ impl Stream for NeighborPacketWorker { // // Note: In case poll_unpin is called after the resetted delay fires again, this // will drop one tick. Deemed as very unlikely and also not critical. - while let Poll::Ready(()) = this.delay.poll_unpin(cx) {}; + while let Poll::Ready(()) = this.delay.poll_unpin(cx) {} if let Some((ref to, ref packet)) = this.last { - return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))); + return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))) } Poll::Pending diff --git a/substrate/client/finality-grandpa/src/communication/tests.rs b/substrate/client/finality-grandpa/src/communication/tests.rs index ec8c97dfe3e8a8e8ddecd89361545aefeeec765b..868186bbf0fddfb77e8b00942c293d3d7b6603b5 100644 --- a/substrate/client/finality-grandpa/src/communication/tests.rs +++ b/substrate/client/finality-grandpa/src/communication/tests.rs @@ -18,21 +18,26 @@ //! Tests for the communication portion of the GRANDPA crate. -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use super::{ + gossip::{self, GossipValidator}, + Round, SetId, VoterSet, +}; +use crate::{communication::GRANDPA_PROTOCOL_NAME, environment::SharedVoterSetState}; use futures::prelude::*; +use parity_scale_codec::Encode; use sc_network::{config::Role, Event as NetworkEvent, ObservedRole, PeerId}; -use sc_network_test::{Block, Hash}; use sc_network_gossip::Validator; -use std::sync::Arc; +use sc_network_test::{Block, Hash}; +use sp_finality_grandpa::AuthorityList; use sp_keyring::Ed25519Keyring; -use parity_scale_codec::Encode; use sp_runtime::traits::NumberFor; -use std::{borrow::Cow, pin::Pin, task::{Context, Poll}}; -use crate::communication::GRANDPA_PROTOCOL_NAME; -use crate::environment::SharedVoterSetState; -use sp_finality_grandpa::AuthorityList; -use super::gossip::{self, GossipValidator}; -use super::{VoterSet, Round, SetId}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + borrow::Cow, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; #[derive(Debug)] pub(crate) enum Event { @@ -79,13 +84,14 @@ impl super::Network for TestNetwork { _peers: Vec, _hash: Hash, _number: NumberFor, - ) {} + ) { + } } impl sc_network_gossip::ValidatorContext for TestNetwork { - fn broadcast_topic(&mut self, _: Hash, _: bool) { } + fn broadcast_topic(&mut self, _: Hash, _: bool) {} - fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) { } + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { >::write_notification( @@ -96,7 +102,7 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { ); } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) { } + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } pub(crate) struct Tester { @@ -107,15 +113,17 @@ pub(crate) struct Tester { impl Tester { fn filter_network_events(self, mut pred: F) -> impl Future - where F: FnMut(Event) -> bool + where + F: FnMut(Event) -> bool, { let mut s = Some(self); futures::future::poll_fn(move |cx| loop { match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) { Poll::Ready(None) => panic!("concluded early"), - Poll::Ready(Some(item)) => if pred(item) { - return Poll::Ready(s.take().unwrap()) - }, + Poll::Ready(Some(item)) => + if pred(item) { + return Poll::Ready(s.take().unwrap()) + }, Poll::Pending => return Poll::Pending, } }) @@ -145,8 +153,7 @@ fn config() -> crate::Config { // dummy voter set state fn voter_set_state() -> SharedVoterSetState { - use crate::authorities::AuthoritySet; - use crate::environment::VoterSetState; + use crate::{authorities::AuthoritySet, environment::VoterSetState}; use finality_grandpa::round::State as RoundState; use sp_core::{crypto::Public, H256}; use sp_finality_grandpa::AuthorityId; @@ -157,20 +164,13 @@ fn voter_set_state() -> SharedVoterSetState { let voters = vec![(AuthorityId::from_slice(&[1; 32]), 1)]; let voters = AuthoritySet::genesis(voters).unwrap(); - let set_state = VoterSetState::live( - 0, - &voters, - base, - ); + let set_state = VoterSetState::live(0, &voters, base); set_state.into() } // needs to run in a tokio runtime. -pub(crate) fn make_test_network() -> ( - impl Future, - TestNetwork, -) { +pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { let (tx, rx) = tracing_unbounded("test"); let net = TestNetwork { sender: tx }; @@ -185,13 +185,7 @@ pub(crate) fn make_test_network() -> ( } } - let bridge = super::NetworkBridge::new( - net.clone(), - config(), - voter_set_state(), - None, - None, - ); + let bridge = super::NetworkBridge::new(net.clone(), config(), voter_set_state(), None, None); ( futures::future::ready(Tester { @@ -204,19 +198,16 @@ pub(crate) fn make_test_network() -> ( } fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter() - .map(|key| key.clone().public().into()) - .map(|id| (id, 1)) - .collect() + keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() } struct NoopContext; impl sc_network_gossip::ValidatorContext for NoopContext { - fn broadcast_topic(&mut self, _: Hash, _: bool) { } - fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) { } - fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) { } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) { } + fn broadcast_topic(&mut self, _: Hash, _: bool) {} + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} + fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) {} + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } #[test] @@ -232,9 +223,12 @@ fn good_commit_leads_to_relay() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = + finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; let payload = sp_finality_grandpa::localized_payload( - round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()) + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), ); let mut precommits = Vec::new(); @@ -247,24 +241,21 @@ fn good_commit_leads_to_relay() { auth_data.push((signature, public[i].0.clone())) } - finality_grandpa::CompactCommit { - target_hash, - target_number, - precommits, - auth_data, - } + finality_grandpa::CompactCommit { target_hash, target_number, precommits, auth_data } }; let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { round: Round(round), set_id: SetId(set_id), message: commit, - }).encode(); + }) + .encode(); let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); - let test = make_test_network().0 + let test = make_test_network() + .0 .then(move |tester| { // register a peer. tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); @@ -272,7 +263,8 @@ fn good_commit_leads_to_relay() { }) .then(move |(tester, id)| { // start round, dispatch commit, and wait for broadcast. - let (commits_in, _) = tester.net_handle.global_communication(SetId(1), voter_set, false); + let (commits_in, _) = + tester.net_handle.global_communication(SetId(1), voter_set, false); { let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); @@ -301,7 +293,10 @@ fn good_commit_leads_to_relay() { let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_PROTOCOL_NAME.into(), commit_to_send.clone().into())], + messages: vec![( + GRANDPA_PROTOCOL_NAME.into(), + commit_to_send.clone().into(), + )], }); // Add a random peer which will be the recipient of this message @@ -316,13 +311,11 @@ fn good_commit_leads_to_relay() { // Announce its local set has being on the current set id through a neighbor // packet, otherwise it won't be eligible to receive the commit let _ = { - let update = gossip::VersionedNeighborPacket::V1( - gossip::NeighborPacket { - round: Round(round), - set_id: SetId(set_id), - commit_finalized_height: 1, - } - ); + let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 1, + }); let msg = gossip::GossipMessage::::Neighbor(update); @@ -333,31 +326,27 @@ fn good_commit_leads_to_relay() { }; true - } + }, _ => false, }); // when the commit comes in, we'll tell the callback it was good. - let handle_commit = commits_in.into_future() - .map(|(item, _)| { - match item.unwrap() { - finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { - callback.run(finality_grandpa::voter::CommitProcessingOutcome::good()); - }, - _ => panic!("commit expected"), - } - }); + let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::good()); + }, + _ => panic!("commit expected"), + }); // once the message is sent and commit is "handled" we should have // a repropagation event coming from the network. - let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { - tester.filter_network_events(move |event| match event { - Event::WriteNotification(_, data) => { - data == encoded_commit - } - _ => false, + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::WriteNotification(_, data) => data == encoded_commit, + _ => false, + }) }) - }) .map(|_| ()); // Poll both the future sending and handling the commit, as well as the underlying @@ -382,9 +371,12 @@ fn bad_commit_leads_to_report() { let target_hash: Hash = [1; 32].into(); let target_number = 500; - let precommit = finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; + let precommit = + finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; let payload = sp_finality_grandpa::localized_payload( - round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()) + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), ); let mut precommits = Vec::new(); @@ -397,24 +389,21 @@ fn bad_commit_leads_to_report() { auth_data.push((signature, public[i].0.clone())) } - finality_grandpa::CompactCommit { - target_hash, - target_number, - precommits, - auth_data, - } + finality_grandpa::CompactCommit { target_hash, target_number, precommits, auth_data } }; let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { round: Round(round), set_id: SetId(set_id), message: commit, - }).encode(); + }) + .encode(); let id = sc_network::PeerId::random(); let global_topic = super::global_topic::(set_id); - let test = make_test_network().0 + let test = make_test_network() + .0 .map(move |tester| { // register a peer. tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); @@ -422,7 +411,8 @@ fn bad_commit_leads_to_report() { }) .then(move |(tester, id)| { // start round, dispatch commit, and wait for broadcast. - let (commits_in, _) = tester.net_handle.global_communication(SetId(1), voter_set, false); + let (commits_in, _) = + tester.net_handle.global_communication(SetId(1), voter_set, false); { let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); @@ -449,35 +439,35 @@ fn bad_commit_leads_to_report() { }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), - messages: vec![(GRANDPA_PROTOCOL_NAME.into(), commit_to_send.clone().into())], + messages: vec![( + GRANDPA_PROTOCOL_NAME.into(), + commit_to_send.clone().into(), + )], }); true - } + }, _ => false, }); // when the commit comes in, we'll tell the callback it was bad. - let handle_commit = commits_in.into_future() - .map(|(item, _)| { - match item.unwrap() { - finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { - callback.run(finality_grandpa::voter::CommitProcessingOutcome::bad()); - }, - _ => panic!("commit expected"), - } - }); + let handle_commit = commits_in.into_future().map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::bad()); + }, + _ => panic!("commit expected"), + }); // once the message is sent and commit is "handled" we should have // a report event coming from the network. - let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { - tester.filter_network_events(move |event| match event { - Event::Report(who, cost_benefit) => { - who == id && cost_benefit == super::cost::INVALID_COMMIT - } - _ => false, + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::Report(who, cost_benefit) => + who == id && cost_benefit == super::cost::INVALID_COMMIT, + _ => false, + }) }) - }) .map(|_| ()); // Poll both the future sending and handling the commit, as well as the underlying @@ -508,7 +498,8 @@ fn peer_with_higher_view_leads_to_catch_up_request() { set_id: SetId(0), round: Round(10), commit_finalized_height: 50, - }).encode(), + }) + .encode(), ); // neighbor packets are always discard @@ -518,27 +509,23 @@ fn peer_with_higher_view_leads_to_catch_up_request() { } // a catch up request should be sent to the peer for round - 1 - tester.filter_network_events(move |event| match event { - Event::WriteNotification(peer, message) => { - assert_eq!( - peer, - id, - ); - - assert_eq!( - message, - gossip::GossipMessage::::CatchUpRequest( - gossip::CatchUpRequestMessage { - set_id: SetId(0), - round: Round(9), - } - ).encode(), - ); + tester + .filter_network_events(move |event| match event { + Event::WriteNotification(peer, message) => { + assert_eq!(peer, id,); + + assert_eq!( + message, + gossip::GossipMessage::::CatchUpRequest( + gossip::CatchUpRequestMessage { set_id: SetId(0), round: Round(9) } + ) + .encode(), + ); - true - }, - _ => false, - }) + true + }, + _ => false, + }) .map(|_| ()) }); diff --git a/substrate/client/finality-grandpa/src/environment.rs b/substrate/client/finality-grandpa/src/environment.rs index 964e199f909687babf65e70e07e182ab13b40a16..c39453b1c8bea3a8b3828d947f4a689c6673aad9 100644 --- a/substrate/client/finality-grandpa/src/environment.rs +++ b/substrate/client/finality-grandpa/src/environment.rs @@ -16,12 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; -use std::iter::FromIterator; -use std::marker::PhantomData; -use std::pin::Pin; -use std::sync::Arc; -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + iter::FromIterator, + marker::PhantomData, + pin::Pin, + sync::Arc, + time::Duration, +}; use finality_grandpa::{ round::State as RoundState, voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError, @@ -44,8 +46,10 @@ use sp_finality_grandpa::{ AuthorityId, AuthoritySignature, Equivocation, EquivocationProof, GrandpaApi, RoundNumber, SetId, GRANDPA_ENGINE_ID, }; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, +}; use crate::{ authorities::{AuthoritySet, SharedAuthoritySet}, @@ -105,13 +109,11 @@ impl Encode for CompletedRounds { impl parity_scale_codec::EncodeLike for CompletedRounds {} impl Decode for CompletedRounds { - fn decode(value: &mut I) -> Result { + fn decode( + value: &mut I, + ) -> Result { <(Vec>, SetId, Vec)>::decode(value) - .map(|(rounds, set_id, voters)| CompletedRounds { - rounds, - set_id, - voters, - }) + .map(|(rounds, set_id, voters)| CompletedRounds { rounds, set_id, voters }) } } @@ -121,9 +123,7 @@ impl CompletedRounds { genesis: CompletedRound, set_id: SetId, voters: &AuthoritySet>, - ) - -> CompletedRounds - { + ) -> CompletedRounds { let mut rounds = Vec::with_capacity(NUM_LAST_COMPLETED_ROUNDS); rounds.push(genesis); @@ -137,13 +137,14 @@ impl CompletedRounds { } /// Iterate over all completed rounds. - pub fn iter(&self) -> impl Iterator> { + pub fn iter(&self) -> impl Iterator> { self.rounds.iter().rev() } /// Returns the last (latest) completed round. pub fn last(&self) -> &CompletedRound { - self.rounds.first() + self.rounds + .first() .expect("inner is never empty; always contains at least genesis; qed") } @@ -152,10 +153,11 @@ impl CompletedRounds { pub fn push(&mut self, completed_round: CompletedRound) { use std::cmp::Reverse; - match self.rounds.binary_search_by_key( - &Reverse(completed_round.number), - |completed_round| Reverse(completed_round.number), - ) { + match self + .rounds + .binary_search_by_key(&Reverse(completed_round.number), |completed_round| { + Reverse(completed_round.number) + }) { Ok(idx) => self.rounds[idx] = completed_round, Err(idx) => self.rounds.insert(idx, completed_round), }; @@ -215,37 +217,31 @@ impl VoterSetState { let mut current_rounds = CurrentRounds::new(); current_rounds.insert(1, HasVoted::No); - VoterSetState::Live { - completed_rounds, - current_rounds, - } + VoterSetState::Live { completed_rounds, current_rounds } } /// Returns the last completed rounds. pub(crate) fn completed_rounds(&self) -> CompletedRounds { match self { - VoterSetState::Live { completed_rounds, .. } => - completed_rounds.clone(), - VoterSetState::Paused { completed_rounds } => - completed_rounds.clone(), + VoterSetState::Live { completed_rounds, .. } => completed_rounds.clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.clone(), } } /// Returns the last completed round. pub(crate) fn last_completed_round(&self) -> CompletedRound { match self { - VoterSetState::Live { completed_rounds, .. } => - completed_rounds.last().clone(), - VoterSetState::Paused { completed_rounds } => - completed_rounds.last().clone(), + VoterSetState::Live { completed_rounds, .. } => completed_rounds.last().clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.last().clone(), } } /// Returns the voter set state validating that it includes the given round /// in current rounds and that the voter isn't paused. - pub fn with_current_round(&self, round: RoundNumber) - -> Result<(&CompletedRounds, &CurrentRounds), Error> - { + pub fn with_current_round( + &self, + round: RoundNumber, + ) -> Result<(&CompletedRounds, &CurrentRounds), Error> { if let VoterSetState::Live { completed_rounds, current_rounds } = self { if current_rounds.contains_key(&round) { Ok((completed_rounds, current_rounds)) @@ -284,10 +280,9 @@ impl HasVoted { /// Returns the proposal we should vote with (if any.) pub fn propose(&self) -> Option<&PrimaryPropose> { match self { - HasVoted::Yes(_, Vote::Propose(propose)) => - Some(propose), - HasVoted::Yes(_, Vote::Prevote(propose, _)) | HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => - propose.as_ref(), + HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), + HasVoted::Yes(_, Vote::Prevote(propose, _)) | + HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), _ => None, } } @@ -295,8 +290,8 @@ impl HasVoted { /// Returns the prevote we should vote with (if any.) pub fn prevote(&self) -> Option<&Prevote> { match self { - HasVoted::Yes(_, Vote::Prevote(_, prevote)) | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => - Some(prevote), + HasVoted::Yes(_, Vote::Prevote(_, prevote)) | + HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), _ => None, } } @@ -304,8 +299,7 @@ impl HasVoted { /// Returns the precommit we should vote with (if any.) pub fn precommit(&self) -> Option<&Precommit> { match self { - HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => - Some(precommit), + HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => Some(precommit), _ => None, } } @@ -376,21 +370,21 @@ impl SharedVoterSetState { /// Return vote status information for the current round. pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { match &*self.inner.read() { - VoterSetState::Live { current_rounds, .. } => { - current_rounds.get(&round).and_then(|has_voted| match has_voted { - HasVoted::Yes(id, vote) => - Some(HasVoted::Yes(id.clone(), vote.clone())), + VoterSetState::Live { current_rounds, .. } => current_rounds + .get(&round) + .and_then(|has_voted| match has_voted { + HasVoted::Yes(id, vote) => Some(HasVoted::Yes(id.clone(), vote.clone())), _ => None, }) - .unwrap_or(HasVoted::No) - }, + .unwrap_or(HasVoted::No), _ => HasVoted::No, } } // NOTE: not exposed outside of this module intentionally. fn with(&self, f: F) -> R - where F: FnOnce(&mut VoterSetState) -> R + where + F: FnOnce(&mut VoterSetState) -> R, { f(&mut *self.inner.write()) } @@ -452,8 +446,9 @@ impl, SC, VR> Environment(&self, f: F) -> Result<(), Error> where - F: FnOnce(&VoterSetState) -> Result>, Error> + pub(crate) fn update_voter_set_state(&self, f: F) -> Result<(), Error> + where + F: FnOnce(&VoterSetState) -> Result>, Error>, { self.voter_set_state.with(|voter_set_state| { if let Some(set_state) = f(&voter_set_state)? { @@ -461,7 +456,9 @@ impl, SC, VR> Environment { - return Err(Error::Safety( - "Authority set change signalled at genesis.".to_string(), - )) - } + Some((_, n)) if n.is_zero() => + return Err(Error::Safety("Authority set change signalled at genesis.".to_string())), // the next set starts at `n` so the current one lasts until `n - 1`. if // `n` is later than the best block, then the current set is still live // at best block. @@ -538,14 +532,15 @@ where // its parent block is the last block in the current set *header.parent_hash() - } + }, // there is no pending change, the latest block for the current set is // the best block. None => best_block_hash, }; // generate key ownership proof at that block - let key_owner_proof = match self.client + let key_owner_proof = match self + .client .runtime_api() .generate_key_ownership_proof( &BlockId::Hash(current_set_latest_hash), @@ -557,15 +552,12 @@ where Some(proof) => proof, None => { debug!(target: "afg", "Equivocation offender is not part of the authority set."); - return Ok(()); - } + return Ok(()) + }, }; // submit equivocation report at **best** block - let equivocation_proof = EquivocationProof::new( - authority_set.set_id, - equivocation, - ); + let equivocation_proof = EquivocationProof::new(authority_set.set_id, equivocation); self.client .runtime_api() @@ -608,7 +600,9 @@ pub(crate) fn ancestry( where Client: HeaderMetadata, { - if base == block { return Err(GrandpaError::NotDescendent) } + if base == block { + return Err(GrandpaError::NotDescendent) + } let tree_route_res = sp_blockchain::tree_route(&**client, block, base); @@ -618,22 +612,17 @@ where debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {:?}", block, base, e); - return Err(GrandpaError::NotDescendent); - } + return Err(GrandpaError::NotDescendent) + }, }; if tree_route.common_block().hash != base { - return Err(GrandpaError::NotDescendent); + return Err(GrandpaError::NotDescendent) } // skip one because our ancestry is meant to start from the parent of `block`, // and `tree_route` includes it. - Ok(tree_route - .retracted() - .iter() - .skip(1) - .map(|e| e.hash) - .collect()) + Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) } impl voter::Environment> @@ -699,7 +688,7 @@ where // before activating the new set. the `authority_set` is updated immediately thus // we restrict the voter based on that. if set_id != authority_set.set_id() { - return Ok(None); + return Ok(None) } best_chain_containing(block, client, authority_set, select_chain, voting_rule) @@ -718,13 +707,12 @@ where let local_id = local_authority_id(&self.voters, self.config.keystore.as_ref()); let has_voted = match self.voter_set_state.has_voted(round) { - HasVoted::Yes(id, vote) => { + HasVoted::Yes(id, vote) => if local_id.as_ref().map(|k| k == &id).unwrap_or(false) { HasVoted::Yes(id, vote) } else { HasVoted::No - } - }, + }, HasVoted::No => HasVoted::No, }; @@ -756,14 +744,17 @@ where // schedule incoming messages from the network to be held until // corresponding blocks are imported. - let incoming = Box::pin(UntilVoteTargetImported::new( - self.client.import_notification_stream(), - self.network.clone(), - self.client.clone(), - incoming, - "round", - None, - ).map_err(Into::into)); + let incoming = Box::pin( + UntilVoteTargetImported::new( + self.client.import_notification_stream(), + self.network.clone(), + self.client.clone(), + incoming, + "round", + None, + ) + .map_err(Into::into), + ); // schedule network message cleanup when sink drops. let outgoing = Box::pin(outgoing.sink_err_into()); @@ -789,18 +780,20 @@ where self.update_voter_set_state(|voter_set_state| { let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; - let current_round = current_rounds.get(&round) + let current_round = current_rounds + .get(&round) .expect("checked in with_current_round that key exists; qed."); if !current_round.can_propose() { // we've already proposed in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) + let current_round = current_rounds + .get_mut(&round) .expect("checked previously that key exists; qed."); *current_round = HasVoted::Yes(local_id, Vote::Propose(propose)); @@ -849,7 +842,7 @@ where // we've already prevoted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } // report to telemetry and prometheus @@ -858,7 +851,8 @@ where let propose = current_round.propose(); let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) + let current_round = current_rounds + .get_mut(&round) .expect("checked previously that key exists; qed."); *current_round = HasVoted::Yes(local_id, Vote::Prevote(propose.cloned(), prevote)); @@ -911,7 +905,7 @@ where // we've already precommitted in this round (in a previous run), // ignore the given vote and don't update the voter set // state - return Ok(None); + return Ok(None) } // report to telemetry and prometheus @@ -922,12 +916,13 @@ where HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, _ => { let msg = "Voter precommitting before prevoting."; - return Err(Error::Safety(msg.to_string())); - } + return Err(Error::Safety(msg.to_string())) + }, }; let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) + let current_round = current_rounds + .get_mut(&round) .expect("checked previously that key exists; qed."); *current_round = HasVoted::Yes( @@ -973,7 +968,7 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); + return Err(Error::Safety(msg.to_string())) }; let mut completed_rounds = completed_rounds.clone(); @@ -998,10 +993,7 @@ where current_rounds.insert(round + 1, HasVoted::No); } - let set_state = VoterSetState::::Live { - completed_rounds, - current_rounds, - }; + let set_state = VoterSetState::::Live { completed_rounds, current_rounds }; crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; @@ -1038,21 +1030,21 @@ where (completed_rounds, current_rounds) } else { let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); + return Err(Error::Safety(msg.to_string())) }; let mut completed_rounds = completed_rounds.clone(); - if let Some(already_completed) = completed_rounds.rounds - .iter_mut().find(|r| r.number == round) + if let Some(already_completed) = + completed_rounds.rounds.iter_mut().find(|r| r.number == round) { let n_existing_votes = already_completed.votes.len(); // the interface of Environment guarantees that the previous `historical_votes` // from `completable` is a prefix of what is passed to `concluded`. - already_completed.votes.extend( - historical_votes.seen().iter().skip(n_existing_votes).cloned() - ); + already_completed + .votes + .extend(historical_votes.seen().iter().skip(n_existing_votes).cloned()); already_completed.state = state; crate::aux_schema::write_concluded_round(&*self.client, &already_completed)?; } @@ -1161,8 +1153,8 @@ where block, ); - return Ok(None); - } + return Ok(None) + }, }; // we refuse to vote beyond the current limit number where transitions are scheduled to occur. @@ -1195,7 +1187,7 @@ where } if *target_header.number() == target_number { - break; + break } target_header = client @@ -1230,15 +1222,15 @@ where restricted_number < target_header.number() }) .or_else(|| Some((target_header.hash(), *target_header.number()))) - } + }, Ok(None) => { debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); None - } + }, Err(e) => { debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); None - } + }, }; Ok(result) @@ -1281,20 +1273,22 @@ where status.finalized_number, ); - return Ok(()); + return Ok(()) } // FIXME #1483: clone only when changed let old_authority_set = authority_set.clone(); let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { - let status = authority_set.apply_standard_changes( - hash, - number, - &is_descendent_of::(&*client, None), - initial_sync, - None, - ).map_err(|e| Error::Safety(e.to_string()))?; + let status = authority_set + .apply_standard_changes( + hash, + number, + &is_descendent_of::(&*client, None), + initial_sync, + None, + ) + .map_err(|e| Error::Safety(e.to_string()))?; // send a justification notification if a sender exists and in case of error log it. fn notify_justification( @@ -1327,17 +1321,15 @@ where if !justification_required { if let Some(justification_period) = justification_period { let last_finalized_number = client.info().finalized_number; - justification_required = - (!last_finalized_number.is_zero() || number - last_finalized_number == justification_period) && - (last_finalized_number / justification_period != number / justification_period); + justification_required = (!last_finalized_number.is_zero() || + number - last_finalized_number == justification_period) && + (last_finalized_number / justification_period != + number / justification_period); } } - let justification = GrandpaJustification::from_commit( - &client, - round_number, - commit, - )?; + let justification = + GrandpaJustification::from_commit(&client, round_number, commit)?; (justification_required, justification) }, @@ -1369,25 +1361,22 @@ where "number" => ?number, "hash" => ?hash, ); - crate::aux_schema::update_best_justification( - &justification, - |insert| apply_aux(import_op, insert, &[]), - )?; + crate::aux_schema::update_best_justification(&justification, |insert| { + apply_aux(import_op, insert, &[]) + })?; let new_authorities = if let Some((canon_hash, canon_number)) = status.new_set_block { // the authority set has changed. let (new_id, set_ref) = authority_set.current(); if set_ref.len() > 16 { - afg_log!(initial_sync, + afg_log!( + initial_sync, "👴 Applying GRANDPA set change to new set with {} authorities", set_ref.len(), ); } else { - afg_log!(initial_sync, - "👴 Applying GRANDPA set change to new set {:?}", - set_ref, - ); + afg_log!(initial_sync, "👴 Applying GRANDPA set change to new set {:?}", set_ref,); } telemetry!( @@ -1419,7 +1408,7 @@ where warn!(target: "afg", "Failed to write updated authority set to disk. Bailing."); warn!(target: "afg", "Node is in a potentially inconsistent state."); - return Err(e.into()); + return Err(e.into()) } } @@ -1433,6 +1422,6 @@ where *authority_set = old_authority_set; Err(CommandOrError::Error(e)) - } + }, } } diff --git a/substrate/client/finality-grandpa/src/finality_proof.rs b/substrate/client/finality-grandpa/src/finality_proof.rs index ec33d48774ae5735a91ba2df130a9cfa34bfb6c3..56533704af807565688ab663df264223b8825153 100644 --- a/substrate/client/finality-grandpa/src/finality_proof.rs +++ b/substrate/client/finality-grandpa/src/finality_proof.rs @@ -39,19 +39,20 @@ use log::{trace, warn}; use std::sync::Arc; -use parity_scale_codec::{Encode, Decode}; +use parity_scale_codec::{Decode, Encode}; +use sc_client_api::backend::Backend; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_finality_grandpa::GRANDPA_ENGINE_ID; use sp_runtime::{ generic::BlockId, - traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, }; -use sc_client_api::backend::Backend; use crate::{ - SharedAuthoritySet, best_justification, authorities::{AuthoritySetChangeId, AuthoritySetChanges}, + best_justification, justification::GrandpaJustification, + SharedAuthoritySet, }; const MAX_UNKNOWN_HEADERS: usize = 100_000; @@ -76,10 +77,7 @@ where backend: Arc, shared_authority_set: Option>>, ) -> Self { - FinalityProofProvider { - backend, - shared_authority_set, - } + FinalityProofProvider { backend, shared_authority_set } } /// Create new finality proof provider for the service using: @@ -113,14 +111,10 @@ where { changes } else { - return Ok(None); + return Ok(None) }; - prove_finality( - &*self.backend, - authority_set_changes, - block, - ) + prove_finality(&*self.backend, authority_set_changes, block) } } @@ -166,11 +160,10 @@ where if info.finalized_number < block { let err = format!( "Requested finality proof for descendant of #{} while we only have finalized #{}.", - block, - info.finalized_number, + block, info.finalized_number, ); trace!(target: "afg", "{}", &err); - return Err(FinalityProofError::BlockNotYetFinalized); + return Err(FinalityProofError::BlockNotYetFinalized) } let (justification, just_block) = match authority_set_changes.get_set_id(block) { @@ -185,9 +178,9 @@ where "No justification found for the latest finalized block. \ Returning empty proof.", ); - return Ok(None); + return Ok(None) } - } + }, AuthoritySetChangeId::Set(_, last_block_for_set) => { let last_block_for_set_id = BlockId::Number(last_block_for_set); let justification = if let Some(grandpa_justification) = backend @@ -203,10 +196,10 @@ where Returning empty proof.", block, ); - return Ok(None); + return Ok(None) }; (justification, last_block_for_set) - } + }, AuthoritySetChangeId::Unknown => { warn!( target: "afg", @@ -214,8 +207,8 @@ where You need to resync to populate AuthoritySetChanges properly.", block, ); - return Err(FinalityProofError::BlockNotInAuthoritySetChanges); - } + return Err(FinalityProofError::BlockNotInAuthoritySetChanges) + }, }; // Collect all headers from the requested block until the last block of the set @@ -224,7 +217,7 @@ where let mut current = block + One::one(); loop { if current > just_block || headers.len() >= MAX_UNKNOWN_HEADERS { - break; + break } headers.push(backend.blockchain().expect_header(BlockId::Number(current))?); current += One::one(); @@ -245,9 +238,7 @@ where #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{ - authorities::AuthoritySetChanges, BlockNumberOps, ClientError, SetId, - }; + use crate::{authorities::AuthoritySetChanges, BlockNumberOps, ClientError, SetId}; use futures::executor::block_on; use sc_block_builder::BlockBuilderProvider; use sc_client_api::{apply_aux, LockImportRun}; @@ -276,8 +267,9 @@ pub(crate) mod tests { let proof = super::FinalityProof::::decode(&mut &remote_proof[..]) .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; - let justification: GrandpaJustification = Decode::decode(&mut &proof.justification[..]) - .map_err(|_| ClientError::JustificationDecode)?; + let justification: GrandpaJustification = + Decode::decode(&mut &proof.justification[..]) + .map_err(|_| ClientError::JustificationDecode)?; justification.verify(current_set_id, ¤t_authorities)?; Ok(proof) @@ -321,13 +313,13 @@ pub(crate) mod tests { } fn store_best_justification(client: &TestClient, just: &GrandpaJustification) { - client.lock_import_and_run(|import_op| { - crate::aux_schema::update_best_justification( - just, - |insert| apply_aux(import_op, insert, &[]), - ) - }) - .unwrap(); + client + .lock_import_and_run(|import_op| { + crate::aux_schema::update_best_justification(just, |insert| { + apply_aux(import_op, insert, &[]) + }) + }) + .unwrap(); } #[test] @@ -336,11 +328,7 @@ pub(crate) mod tests { let authority_set_changes = AuthoritySetChanges::empty(); // The last finalized block is 4, so we cannot provide further justifications. - let proof_of_5 = prove_finality( - &*backend, - authority_set_changes, - 5, - ); + let proof_of_5 = prove_finality(&*backend, authority_set_changes, 5); assert!(matches!(proof_of_5, Err(FinalityProofError::BlockNotYetFinalized))); } @@ -353,12 +341,7 @@ pub(crate) mod tests { // Block 4 is finalized without justification // => we can't prove finality of 3 - let proof_of_3 = prove_finality( - &*backend, - authority_set_changes, - 3, - ) - .unwrap(); + let proof_of_3 = prove_finality(&*backend, authority_set_changes, 3).unwrap(); assert_eq!(proof_of_3, None); } @@ -406,14 +389,15 @@ pub(crate) mod tests { 1, vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], finality_proof.encode(), - ).unwrap_err(); + ) + .unwrap_err(); } fn create_commit( block: Block, round: u64, set_id: SetId, - auth: &[Ed25519Keyring] + auth: &[Ed25519Keyring], ) -> finality_grandpa::Commit where Id: From, @@ -481,11 +465,7 @@ pub(crate) mod tests { let mut authority_set_changes = AuthoritySetChanges::empty(); authority_set_changes.append(1, 8); - let proof_of_6 = prove_finality( - &*backend, - authority_set_changes, - 6, - ); + let proof_of_6 = prove_finality(&*backend, authority_set_changes, 6); assert!(matches!(proof_of_6, Err(FinalityProofError::BlockNotInAuthoritySetChanges))); } @@ -499,11 +479,9 @@ pub(crate) mod tests { let commit = create_commit(block8.clone(), round, 1, &[Ed25519Keyring::Alice]); let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); - client.finalize_block( - BlockId::Number(8), - Some((ID, grandpa_just8.encode().clone())) - ) - .unwrap(); + client + .finalize_block(BlockId::Number(8), Some((ID, grandpa_just8.encode().clone()))) + .unwrap(); // Authority set change at block 8, so the justification stored there will be used in the // FinalityProof for block 6 @@ -512,13 +490,7 @@ pub(crate) mod tests { authority_set_changes.append(1, 8); let proof_of_6: FinalityProof = Decode::decode( - &mut &prove_finality( - &*backend, - authority_set_changes.clone(), - 6, - ) - .unwrap() - .unwrap()[..], + &mut &prove_finality(&*backend, authority_set_changes.clone(), 6).unwrap().unwrap()[..], ) .unwrap(); assert_eq!( @@ -540,10 +512,7 @@ pub(crate) mod tests { let mut authority_set_changes = AuthoritySetChanges::empty(); authority_set_changes.append(0, 5); - assert!(matches!( - prove_finality(&*backend, authority_set_changes, 6), - Ok(None), - )); + assert!(matches!(prove_finality(&*backend, authority_set_changes, 6), Ok(None),)); } #[test] @@ -563,13 +532,7 @@ pub(crate) mod tests { authority_set_changes.append(0, 5); let proof_of_6: FinalityProof = Decode::decode( - &mut &prove_finality( - &*backend, - authority_set_changes, - 6, - ) - .unwrap() - .unwrap()[..], + &mut &prove_finality(&*backend, authority_set_changes, 6).unwrap().unwrap()[..], ) .unwrap(); assert_eq!( diff --git a/substrate/client/finality-grandpa/src/import.rs b/substrate/client/finality-grandpa/src/import.rs index ebb26a28c34855e7c59bf1b78b78b0fdbb1939a9..18e5e2c89d0674fc64a75365b047890810761229 100644 --- a/substrate/client/finality-grandpa/src/import.rs +++ b/substrate/client/finality-grandpa/src/import.rs @@ -31,9 +31,11 @@ use sp_consensus::{ ImportResult, JustificationImport, SelectChain, }; use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; -use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; -use sp_runtime::traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}; -use sp_runtime::Justification; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}, + Justification, +}; use sp_utils::mpsc::TracingUnboundedSender; use crate::{ @@ -98,12 +100,8 @@ where let chain_info = self.inner.info(); // request justifications for all pending changes for which change blocks have already been imported - let pending_changes: Vec<_> = self - .authority_set - .inner() - .pending_changes() - .cloned() - .collect(); + let pending_changes: Vec<_> = + self.authority_set.inner().pending_changes().cloned().collect(); for pending_change in pending_changes { if pending_change.delay_kind == DelayKind::Finalized && @@ -241,7 +239,7 @@ where ) -> Option>> { // check for forced authority set hard forks if let Some(change) = self.authority_set_hard_forks.get(&hash) { - return Some(change.clone()); + return Some(change.clone()) } // check for forced change. @@ -252,7 +250,7 @@ where canon_height: *header.number(), canon_hash: hash, delay_kind: DelayKind::Best { median_last_finalized }, - }); + }) } // check normal scheduled change. @@ -295,10 +293,9 @@ where fn consume( mut self, ) -> Option<(AuthoritySet, SharedDataLocked<'a, AuthoritySet>)> { - self.old.take().map(|old| ( - old, - self.guard.take().expect("only taken on deconstruction; qed"), - )) + self.old + .take() + .map(|old| (old, self.guard.take().expect("only taken on deconstruction; qed"))) } } @@ -311,20 +308,14 @@ where } let number = *(block.header.number()); - let maybe_change = self.check_new_change( - &block.header, - hash, - ); + let maybe_change = self.check_new_change(&block.header, hash); // returns a function for checking whether a block is a descendent of another // consistent with querying client directly after importing the block. let parent_hash = *block.header.parent_hash(); let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); - let mut guard = InnerGuard { - guard: Some(self.authority_set.inner_locked()), - old: None, - }; + let mut guard = InnerGuard { guard: Some(self.authority_set.inner_locked()), old: None }; // whether to pause the old authority set -- happens after import // of a forced change block. @@ -339,10 +330,10 @@ where do_pause = true; } - guard.as_mut().add_pending_change( - change, - &is_descendent_of, - ).map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + guard + .as_mut() + .add_pending_change(change, &is_descendent_of) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; } let applied_changes = { @@ -389,7 +380,9 @@ where AppliedChanges::Forced(new_authorities) } else { - let did_standard = guard.as_mut().enacts_standard_change(hash, number, &is_descendent_of) + let did_standard = guard + .as_mut() + .enacts_standard_change(hash, number, &is_descendent_of) .map_err(|e| ConsensusError::ClientImport(e.to_string())) .map_err(ConsensusError::from)?; @@ -413,19 +406,17 @@ where crate::aux_schema::update_authority_set::( authorities, authorities_change, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) + |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }, ); } let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); - Ok(PendingSetChanges { - just_in_case, - applied_changes, - do_pause, - }) + Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) } } @@ -459,7 +450,7 @@ where // Strip justifications when re-importing an existing block. let _justifications = block.justifications.take(); return (&*self.inner).import_block(block, new_cache).await - } + }, Ok(BlockStatus::Unknown) => {}, Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } @@ -483,7 +474,7 @@ where r, ); pending_changes.revert(); - return Ok(r); + return Ok(r) }, Err(e) => { debug!( @@ -492,7 +483,7 @@ where e, ); pending_changes.revert(); - return Err(ConsensusError::ClientImport(e.to_string())); + return Err(ConsensusError::ClientImport(e.to_string())) }, } }; @@ -501,9 +492,9 @@ where // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. if do_pause { - let _ = self.send_voter_commands.unbounded_send( - VoterCommand::Pause("Forced change scheduled after inactivity".to_string()) - ); + let _ = self.send_voter_commands.unbounded_send(VoterCommand::Pause( + "Forced change scheduled after inactivity".to_string(), + )); } let needs_justification = applied_changes.needs_justification(); @@ -521,7 +512,8 @@ where // they should import the block and discard the justification, and they will // then request a justification from sync if it's necessary (which they should // then be able to successfully validate). - let _ = self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); + let _ = + self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); // we must clear all pending justifications requests, presumably they won't be // finalized hence why this forced changes was triggered @@ -537,8 +529,8 @@ where _ => {}, } - let grandpa_justification = justifications - .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)); + let grandpa_justification = + justifications.and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)); match grandpa_justification { Some(justification) => { @@ -559,7 +551,7 @@ where } }); }, - None => { + None => if needs_justification { debug!( target: "afg", @@ -568,8 +560,7 @@ where ); imported_aux.needs_justification = true; - } - } + }, } Ok(ImportResult::Imported(imported_aux)) @@ -616,14 +607,9 @@ impl GrandpaBlockImport { - afg_log!(initial_sync, + afg_log!( + initial_sync, "👴 Imported justification for block #{} that triggers \ command {}, signaling voter.", number, @@ -703,7 +690,7 @@ where // send the command to the voter let _ = self.send_voter_commands.unbounded_send(command); }, - Err(CommandOrError::Error(e)) => { + Err(CommandOrError::Error(e)) => return Err(match e { Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), Error::Network(error) => ConsensusError::ClientImport(error), @@ -713,10 +700,12 @@ where Error::Signing(error) => ConsensusError::ClientImport(error), Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), - }); - }, + }), Ok(_) => { - assert!(!enacts_change, "returns Ok when no authority set change should be enacted; qed;"); + assert!( + !enacts_change, + "returns Ok when no authority set change should be enacted; qed;" + ); }, } diff --git a/substrate/client/finality-grandpa/src/justification.rs b/substrate/client/finality-grandpa/src/justification.rs index 7805161f06c62f9afc6404653c02f0173af1cf45..d051d0c44e037f38eb988da59e117fa3e0f194de 100644 --- a/substrate/client/finality-grandpa/src/justification.rs +++ b/substrate/client/finality-grandpa/src/justification.rs @@ -16,8 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use finality_grandpa::{voter_set::VoterSet, Error as GrandpaError}; use parity_scale_codec::{Decode, Encode}; @@ -52,7 +54,8 @@ impl GrandpaJustification { client: &Arc, round: u64, commit: Commit, - ) -> Result, Error> where + ) -> Result, Error> + where C: HeaderBackend, { let mut votes_ancestries_hashes = HashSet::new(); @@ -66,12 +69,14 @@ impl GrandpaJustification { for signed in commit.precommits.iter() { let mut current_hash = signed.precommit.target_hash; loop { - if current_hash == commit.target_hash { break; } + if current_hash == commit.target_hash { + break + } match client.header(BlockId::Hash(current_hash))? { Some(current_header) => { if *current_header.number() <= commit.target_number { - return error(); + return error() } let parent_hash = *current_header.parent_hash(); @@ -95,20 +100,20 @@ impl GrandpaJustification { finalized_target: (Block::Hash, NumberFor), set_id: u64, voters: &VoterSet, - ) -> Result, ClientError> where + ) -> Result, ClientError> + where NumberFor: finality_grandpa::BlockNumberOps, { - let justification = GrandpaJustification::::decode(&mut &*encoded) .map_err(|_| ClientError::JustificationDecode)?; - if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { + if (justification.commit.target_hash, justification.commit.target_number) != + finalized_target + { let msg = "invalid commit target in grandpa justification".to_string(); Err(ClientError::BadJustification(msg)) } else { - justification - .verify_with_voter_set(set_id, voters) - .map(|_| justification) + justification.verify_with_voter_set(set_id, voters).map(|_| justification) } } @@ -117,9 +122,8 @@ impl GrandpaJustification { where NumberFor: finality_grandpa::BlockNumberOps, { - let voters = VoterSet::new(authorities.iter().cloned()).ok_or(ClientError::Consensus( - sp_consensus::Error::InvalidAuthoritiesSet, - ))?; + let voters = VoterSet::new(authorities.iter().cloned()) + .ok_or(ClientError::Consensus(sp_consensus::Error::InvalidAuthoritiesSet))?; self.verify_with_voter_set(set_id, &voters) } @@ -137,16 +141,12 @@ impl GrandpaJustification { let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); - match finality_grandpa::validate_commit( - &self.commit, - voters, - &ancestry_chain, - ) { + match finality_grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { Ok(ref result) if result.ghost().is_some() => {}, _ => { let msg = "invalid commit in grandpa justification".to_string(); - return Err(ClientError::BadJustification(msg)); - } + return Err(ClientError::BadJustification(msg)) + }, } let mut buf = Vec::new(); @@ -161,11 +161,12 @@ impl GrandpaJustification { &mut buf, ) { return Err(ClientError::BadJustification( - "invalid signature for precommit in grandpa justification".to_string())); + "invalid signature for precommit in grandpa justification".to_string(), + )) } if self.commit.target_hash == signed.precommit.target_hash { - continue; + continue } match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) { @@ -176,21 +177,21 @@ impl GrandpaJustification { visited_hashes.insert(hash); } }, - _ => { + _ => return Err(ClientError::BadJustification( - "invalid precommit ancestry proof in grandpa justification".to_string())); - }, + "invalid precommit ancestry proof in grandpa justification".to_string(), + )), } } - let ancestry_hashes = self.votes_ancestries - .iter() - .map(|h: &Block::Header| h.hash()) - .collect(); + let ancestry_hashes = + self.votes_ancestries.iter().map(|h: &Block::Header| h.hash()).collect(); if visited_hashes != ancestry_hashes { return Err(ClientError::BadJustification( - "invalid precommit ancestries in grandpa justification with unused headers".to_string())); + "invalid precommit ancestries in grandpa justification with unused headers" + .to_string(), + )) } Ok(()) @@ -211,24 +212,28 @@ struct AncestryChain { impl AncestryChain { fn new(ancestry: &[Block::Header]) -> AncestryChain { - let ancestry: HashMap<_, _> = ancestry - .iter() - .cloned() - .map(|h: Block::Header| (h.hash(), h)) - .collect(); + let ancestry: HashMap<_, _> = + ancestry.iter().cloned().map(|h: Block::Header| (h.hash(), h)).collect(); AncestryChain { ancestry } } } -impl finality_grandpa::Chain> for AncestryChain where - NumberFor: finality_grandpa::BlockNumberOps +impl finality_grandpa::Chain> for AncestryChain +where + NumberFor: finality_grandpa::BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { let mut route = Vec::new(); let mut current_hash = block; loop { - if current_hash == base { break; } + if current_hash == base { + break + } match self.ancestry.get(¤t_hash) { Some(current_header) => { current_hash = *current_header.parent_hash(); diff --git a/substrate/client/finality-grandpa/src/lib.rs b/substrate/client/finality-grandpa/src/lib.rs index 6c3f0f6af37a868a73dca55eff773fbafbe66ab1..58e7ba1493e8aaf8bd9bdbd068859ea1d69b77e1 100644 --- a/substrate/client/finality-grandpa/src/lib.rs +++ b/substrate/client/finality-grandpa/src/lib.rs @@ -56,41 +56,39 @@ #![warn(missing_docs)] -use futures::{ - prelude::*, - StreamExt, -}; +use futures::{prelude::*, StreamExt}; use log::{debug, error, info}; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::RwLock; +use prometheus_endpoint::{PrometheusError, Registry}; use sc_client_api::{ backend::{AuxStore, Backend}, - LockImportRun, BlockchainEvents, CallExecutor, - ExecutionStrategy, Finalizer, TransactionFor, ExecutorProvider, + BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun, + TransactionFor, }; -use parity_scale_codec::{Decode, Encode}; -use prometheus_endpoint::{PrometheusError, Registry}; +use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_api::ProvideRuntimeApi; -use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT, DigestFor, Zero}; -use sp_consensus::{SelectChain, BlockImport}; -use sp_core::{ - crypto::Public, -}; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_application_crypto::AppKey; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; +use sp_consensus::{BlockImport, SelectChain}; +use sp_core::crypto::Public; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestFor, NumberFor, Zero}, +}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; -use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO, CONSENSUS_DEBUG}; -use parking_lot::RwLock; -use finality_grandpa::Error as GrandpaError; -use finality_grandpa::{voter, voter_set::VoterSet}; pub use finality_grandpa::BlockNumberOps; - -use std::{fmt, io}; -use std::sync::Arc; -use std::time::Duration; -use std::pin::Pin; -use std::task::{Poll, Context}; +use finality_grandpa::{voter, voter_set::VoterSet, Error as GrandpaError}; + +use std::{ + fmt, io, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; // utility logging macro that takes as first argument a conditional to // decide whether to log under debug or info level (useful to restrict @@ -123,6 +121,7 @@ mod voting_rule; pub use authorities::{AuthoritySet, AuthoritySetChanges, SharedAuthoritySet}; pub use aux_schema::best_justification; +pub use finality_grandpa::voter::report; pub use finality_proof::{FinalityProof, FinalityProofError, FinalityProofProvider}; pub use import::{find_forced_change, find_scheduled_change, GrandpaBlockImport}; pub use justification::GrandpaJustification; @@ -132,13 +131,12 @@ pub use voting_rule::{ BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRuleResult, VotingRulesBuilder, }; -pub use finality_grandpa::voter::report; use aux_schema::PersistentData; use communication::{Network as NetworkT, NetworkBridge}; use environment::{Environment, VoterSetState}; -use until_imported::UntilGlobalMessageBlocksImported; use sp_finality_grandpa::{AuthorityList, AuthoritySignature, SetId}; +use until_imported::UntilGlobalMessageBlocksImported; // Re-export these two because it's just so damn convenient. pub use sp_finality_grandpa::{AuthorityId, AuthorityPair, GrandpaApi, ScheduledChange}; @@ -159,7 +157,8 @@ pub type SignedMessage = finality_grandpa::SignedMessage< >; /// A primary propose message for this chain's block type. -pub type PrimaryPropose = finality_grandpa::PrimaryPropose<::Hash, NumberFor>; +pub type PrimaryPropose = + finality_grandpa::PrimaryPropose<::Hash, NumberFor>; /// A prevote message for this chain's block type. pub type Prevote = finality_grandpa::Prevote<::Hash, NumberFor>; /// A precommit message for this chain's block type. @@ -198,22 +197,14 @@ type CommunicationIn = finality_grandpa::voter::CommunicationIn< /// Global communication input stream for commits and catch up messages, with /// the hash type not being derived from the block, useful for forcing the hash /// to some type (e.g. `H256`) when the compiler can't do the inference. -type CommunicationInH = finality_grandpa::voter::CommunicationIn< - H, - NumberFor, - AuthoritySignature, - AuthorityId, ->; +type CommunicationInH = + finality_grandpa::voter::CommunicationIn, AuthoritySignature, AuthorityId>; /// Global communication sink for commits with the hash type not being derived /// from the block, useful for forcing the hash to some type (e.g. `H256`) when /// the compiler can't do the inference. -type CommunicationOutH = finality_grandpa::voter::CommunicationOut< - H, - NumberFor, - AuthoritySignature, - AuthorityId, ->; +type CommunicationOutH = + finality_grandpa::voter::CommunicationOut, AuthoritySignature, AuthorityId>; /// Shared voter state for querying. pub struct SharedVoterState { @@ -223,18 +214,14 @@ pub struct SharedVoterState { impl SharedVoterState { /// Create a new empty `SharedVoterState` instance. pub fn empty() -> Self { - Self { - inner: Arc::new(RwLock::new(None)), - } + Self { inner: Arc::new(RwLock::new(None)) } } fn reset( &self, voter_state: Box + Sync + Send>, ) -> Option<()> { - let mut shared_voter_state = self - .inner - .try_write_for(Duration::from_secs(1))?; + let mut shared_voter_state = self.inner.try_write_for(Duration::from_secs(1))?; *shared_voter_state = Some(voter_state); Some(()) @@ -323,7 +310,8 @@ pub(crate) trait BlockStatus { fn block_number(&self, hash: Block::Hash) -> Result>, Error>; } -impl BlockStatus for Arc where +impl BlockStatus for Arc +where Client: HeaderBackend, NumberFor: BlockNumberOps, { @@ -337,24 +325,36 @@ impl BlockStatus for Arc where /// Ideally this would be a trait alias, we're not there yet. /// tracking issue pub trait ClientForGrandpa: - LockImportRun + Finalizer + AuxStore - + HeaderMetadata + HeaderBackend - + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider + LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + BlockImport, Error = sp_consensus::Error> - where - BE: Backend, - Block: BlockT, -{} +where + BE: Backend, + Block: BlockT, +{ +} impl ClientForGrandpa for T - where - BE: Backend, - Block: BlockT, - T: LockImportRun + Finalizer + AuxStore - + HeaderMetadata + HeaderBackend - + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider - + BlockImport, Error = sp_consensus::Error>, -{} +where + BE: Backend, + Block: BlockT, + T: LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + + BlockImport, Error = sp_consensus::Error>, +{ +} /// Something that one can ask to do a block sync request. pub(crate) trait BlockSyncRequester { @@ -364,14 +364,25 @@ pub(crate) trait BlockSyncRequester { /// If the given vector of peers is empty then the underlying implementation /// should make a best effort to fetch the block from any peers it is /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); } -impl BlockSyncRequester for NetworkBridge where +impl BlockSyncRequester for NetworkBridge +where Block: BlockT, Network: NetworkT, { - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor) { + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ) { NetworkBridge::set_sync_fork_request(self, peers, hash, number) } } @@ -391,7 +402,7 @@ pub(crate) enum VoterCommand { /// Pause the voter for given reason. Pause(String), /// New authorities. - ChangeAuthorities(NewAuthoritySet) + ChangeAuthorities(NewAuthoritySet), } impl fmt::Display for VoterCommand { @@ -436,7 +447,7 @@ impl From> for CommandOrError { } } -impl ::std::error::Error for CommandOrError { } +impl ::std::error::Error for CommandOrError {} impl fmt::Display for CommandOrError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -476,8 +487,10 @@ pub trait GenesisAuthoritySetProvider { fn get(&self) -> Result; } -impl GenesisAuthoritySetProvider for Arc> - where E: CallExecutor, +impl GenesisAuthoritySetProvider + for Arc> +where + E: CallExecutor, { fn get(&self) -> Result { // This implementation uses the Grandpa runtime API instead of reading directly from the @@ -492,10 +505,12 @@ impl GenesisAuthoritySetProvider for Arc( genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, telemetry: Option, -) -> Result< - ( - GrandpaBlockImport, - LinkHalf, - ), - ClientError, -> +) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> where SC: SelectChain, BE: Backend + 'static, @@ -539,13 +548,7 @@ pub fn block_import_with_authority_set_hard_forks select_chain: SC, authority_set_hard_forks: Vec<(SetId, (Block::Hash, NumberFor), AuthorityList)>, telemetry: Option, -) -> Result< - ( - GrandpaBlockImport, - LinkHalf, - ), - ClientError, -> +) -> Result<(GrandpaBlockImport, LinkHalf), ClientError> where SC: SelectChain, BE: Backend + 'static, @@ -554,11 +557,8 @@ where let chain_info = client.info(); let genesis_hash = chain_info.genesis_hash; - let persistent_data = aux_schema::load_persistent( - &*client, - genesis_hash, - >::zero(), - { + let persistent_data = + aux_schema::load_persistent(&*client, genesis_hash, >::zero(), { let telemetry = telemetry.clone(); move || { let authorities = genesis_authorities_provider.get()?; @@ -570,13 +570,11 @@ where ); Ok(authorities) } - }, - )?; + })?; let (voter_commands_tx, voter_commands_rx) = tracing_unbounded("mpsc_grandpa_voter_command"); - let (justification_sender, justification_stream) = - GrandpaJustificationStream::channel(); + let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); // create pending change objects with 0 delay and enacted on finality // (i.e. standard changes) for each authority set hard fork. @@ -646,11 +644,8 @@ where let is_voter = local_authority_id(voters, keystore).is_some(); // verification stream - let (global_in, global_out) = network.global_communication( - communication::SetId(set_id), - voters.clone(), - is_voter, - ); + let (global_in, global_out) = + network.global_communication(communication::SetId(set_id), voters.clone(), is_voter); // block commit and catch up messages until relevant blocks are imported. let global_in = UntilGlobalMessageBlocksImported::new( @@ -758,23 +753,18 @@ where ); let conf = config.clone(); - let telemetry_task = if let Some(telemetry_on_connect) = telemetry - .as_ref() - .map(|x| x.on_connect_stream()) - { - let authorities = persistent_data.authority_set.clone(); - let telemetry = telemetry.clone(); - let events = telemetry_on_connect - .for_each(move |_| { + let telemetry_task = + if let Some(telemetry_on_connect) = telemetry.as_ref().map(|x| x.on_connect_stream()) { + let authorities = persistent_data.authority_set.clone(); + let telemetry = telemetry.clone(); + let events = telemetry_on_connect.for_each(move |_| { let current_authorities = authorities.current_authorities(); let set_id = authorities.set_id(); let authority_id = local_authority_id(¤t_authorities, conf.keystore.as_ref()) .unwrap_or_default(); - let authorities = current_authorities - .iter() - .map(|(id, _)| id.to_string()) - .collect::>(); + let authorities = + current_authorities.iter().map(|(id, _)| id.to_string()).collect::>(); let authorities = serde_json::to_string(&authorities).expect( "authorities is always at least an empty vector; \ @@ -792,10 +782,10 @@ where future::ready(()) }); - future::Either::Left(events) - } else { - future::Either::Right(future::pending()) - }; + future::Either::Left(events) + } else { + future::Either::Right(future::pending()) + }; let voter_work = VoterWork::new( client, @@ -819,8 +809,7 @@ where }); // Make sure that `telemetry_task` doesn't accidentally finish and kill grandpa. - let telemetry_task = telemetry_task - .then(|_| future::pending::<()>()); + let telemetry_task = telemetry_task.then(|_| future::pending::<()>()); Ok(future::select(voter_work, telemetry_task).map(drop)) } @@ -842,7 +831,9 @@ impl Metrics { /// Future that powers the voter. #[must_use] struct VoterWork, SC, VR> { - voter: Pin>>> + Send>>, + voter: Pin< + Box>>> + Send>, + >, shared_voter_state: SharedVoterState, env: Arc>, voter_commands_rx: TracingUnboundedReceiver>>, @@ -881,7 +872,7 @@ where Some(Err(e)) => { debug!(target: "afg", "Failed to register metrics: {:?}", e); None - } + }, None => None, }; @@ -937,12 +928,7 @@ where let chain_info = self.env.client.info(); - let authorities = self - .env - .voters - .iter() - .map(|(id, _)| id.to_string()) - .collect::>(); + let authorities = self.env.voters.iter().map(|(id, _)| id.to_string()).collect::>(); let authorities = serde_json::to_string(&authorities).expect( "authorities is always at least an empty vector; elements are always of type string; qed.", @@ -961,10 +947,7 @@ where match &*self.env.voter_set_state.read() { VoterSetState::Live { completed_rounds, .. } => { - let last_finalized = ( - chain_info.finalized_hash, - chain_info.finalized_number, - ); + let last_finalized = (chain_info.finalized_hash, chain_info.finalized_number); let global_comms = global_communication( self.env.set_id, @@ -997,20 +980,18 @@ where self.voter = Box::pin(voter); }, - VoterSetState::Paused { .. } => - self.voter = Box::pin(future::pending()), + VoterSetState::Paused { .. } => self.voter = Box::pin(future::pending()), }; } fn handle_voter_command( &mut self, - command: VoterCommand> + command: VoterCommand>, ) -> Result<(), Error> { match command { VoterCommand::ChangeAuthorities(new) => { - let voters: Vec = new.authorities.iter().map(move |(a, _)| { - format!("{}", a) - }).collect(); + let voters: Vec = + new.authorities.iter().map(move |(a, _)| format!("{}", a)).collect(); telemetry!( self.telemetry; CONSENSUS_INFO; @@ -1034,14 +1015,12 @@ where Ok(Some(set_state)) })?; - let voters = Arc::new(VoterSet::new(new.authorities.into_iter()) - .expect( - "new authorities come from pending change; \ + let voters = Arc::new(VoterSet::new(new.authorities.into_iter()).expect( + "new authorities come from pending change; \ pending change comes from `AuthoritySet`; \ `AuthoritySet` validates authorities is non-empty and weights are non-zero; \ - qed." - ) - ); + qed.", + )); self.env = Arc::new(Environment { voters, @@ -1061,7 +1040,7 @@ where self.rebuild_voter(); Ok(()) - } + }, VoterCommand::Pause(reason) => { info!(target: "afg", "Pausing old validator set: {}", reason); @@ -1076,7 +1055,7 @@ where self.rebuild_voter(); Ok(()) - } + }, } } } @@ -1096,37 +1075,35 @@ where fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match Future::poll(Pin::new(&mut self.voter), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(Ok(())) => { // voters don't conclude naturally - return Poll::Ready( - Err(Error::Safety("finality-grandpa inner voter has concluded.".into())) - ) - } + return Poll::Ready(Err(Error::Safety( + "finality-grandpa inner voter has concluded.".into(), + ))) + }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error return Poll::Ready(Err(e)) - } + }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready( - Err(Error::Safety("`voter_commands_rx` was closed.".into())) - ) - } + return Poll::Ready(Err(Error::Safety("`voter_commands_rx` was closed.".into()))) + }, Poll::Ready(Some(command)) => { // some command issued externally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } Future::poll(Pin::new(&mut self.network), cx) @@ -1142,10 +1119,10 @@ fn local_authority_id( ) -> Option { keystore.and_then(|keystore| { voters - .iter() - .find(|(p, _)| { - SyncCryptoStore::has_keys(&**keystore, &[(p.to_raw_vec(), AuthorityId::ID)]) - }) - .map(|(p, _)| p.clone()) + .iter() + .find(|(p, _)| { + SyncCryptoStore::has_keys(&**keystore, &[(p.to_raw_vec(), AuthorityId::ID)]) + }) + .map(|(p, _)| p.clone()) }) } diff --git a/substrate/client/finality-grandpa/src/notification.rs b/substrate/client/finality-grandpa/src/notification.rs index b545f0d8a637e8948939e02e2c2d8003af624524..f0b0b1669dc9538157c5c5c2e32fa6bdb4a63abb 100644 --- a/substrate/client/finality-grandpa/src/notification.rs +++ b/substrate/client/finality-grandpa/src/notification.rs @@ -16,14 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::sync::Arc; use parking_lot::Mutex; +use std::sync::Arc; use sp_runtime::traits::Block as BlockT; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use crate::justification::GrandpaJustification; -use crate::Error; +use crate::{justification::GrandpaJustification, Error}; // Stream of justifications returned when subscribing. type JustificationStream = TracingUnboundedReceiver>; @@ -41,16 +40,14 @@ type SharedJustificationSenders = Arc { - subscribers: SharedJustificationSenders + subscribers: SharedJustificationSenders, } impl GrandpaJustificationSender { /// The `subscribers` should be shared with a corresponding /// `GrandpaJustificationStream`. fn new(subscribers: SharedJustificationSenders) -> Self { - Self { - subscribers, - } + Self { subscribers } } /// Send out a notification to all subscribers that a new justification @@ -83,7 +80,7 @@ impl GrandpaJustificationSender { /// so it can be used to add more subscriptions. #[derive(Clone)] pub struct GrandpaJustificationStream { - subscribers: SharedJustificationSenders + subscribers: SharedJustificationSenders, } impl GrandpaJustificationStream { @@ -100,9 +97,7 @@ impl GrandpaJustificationStream { /// The `subscribers` should be shared with a corresponding /// `GrandpaJustificationSender`. fn new(subscribers: SharedJustificationSenders) -> Self { - Self { - subscribers, - } + Self { subscribers } } /// Subscribe to a channel through which justifications are sent diff --git a/substrate/client/finality-grandpa/src/observer.rs b/substrate/client/finality-grandpa/src/observer.rs index 23c4f873a10b7bcc4e49643e497654c9ba62a8f3..cbea6c138c90f9d3202907c010d269d73cdc0a6e 100644 --- a/substrate/client/finality-grandpa/src/observer.rs +++ b/substrate/client/finality-grandpa/src/observer.rs @@ -16,10 +16,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::marker::{PhantomData, Unpin}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; +use std::{ + marker::{PhantomData, Unpin}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use finality_grandpa::{voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError}; use futures::prelude::*; @@ -95,14 +97,14 @@ where }, voter::CommunicationIn::CatchUp(..) => { // ignore catch up messages - return future::ok(last_finalized_number); + return future::ok(last_finalized_number) }, }; // if the commit we've received targets a block lower or equal to the last // finalized, ignore it and continue with the current state if commit.target_number <= last_finalized_number { - return future::ok(last_finalized_number); + return future::ok(last_finalized_number) } let validation_result = match finality_grandpa::validate_commit( @@ -201,11 +203,9 @@ where telemetry.clone(), ); - let observer_work = observer_work - .map_ok(|_| ()) - .map_err(|e| { - warn!("GRANDPA Observer failed: {:?}", e); - }); + let observer_work = observer_work.map_ok(|_| ()).map_err(|e| { + warn!("GRANDPA Observer failed: {:?}", e); + }); Ok(observer_work.map(drop)) } @@ -213,7 +213,8 @@ where /// Future that powers the observer. #[must_use] struct ObserverWork> { - observer: Pin>>> + Send>>, + observer: + Pin>>> + Send>>, client: Arc, network: NetworkBridge, persistent_data: PersistentData, @@ -285,11 +286,13 @@ where let network = self.network.clone(); let voters = voters.clone(); - move |round| network.note_round( - crate::communication::Round(round), - crate::communication::SetId(set_id), - &*voters, - ) + move |round| { + network.note_round( + crate::communication::Round(round), + crate::communication::SetId(set_id), + &*voters, + ) + } }; // create observer for the current set @@ -337,7 +340,8 @@ where set_state }, - }.into(); + } + .into(); self.rebuild_observer(); Ok(()) @@ -356,33 +360,33 @@ where fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { match Future::poll(Pin::new(&mut self.observer), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(Ok(())) => { // observer commit stream doesn't conclude naturally; this could reasonably be an error. return Poll::Ready(Ok(())) - } + }, Poll::Ready(Err(CommandOrError::Error(e))) => { // return inner observer error return Poll::Ready(Err(e)) - } + }, Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { // some command issued internally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(None) => { // the `voter_commands_rx` stream should never conclude since it's never closed. return Poll::Ready(Ok(())) - } + }, Poll::Ready(Some(command)) => { // some command issued externally self.handle_voter_command(command)?; cx.waker().wake_by_ref(); - } + }, } Future::poll(Pin::new(&mut self.network), cx) @@ -393,12 +397,15 @@ where mod tests { use super::*; + use crate::{ + aux_schema, + communication::tests::{make_test_network, Event}, + }; use assert_matches::assert_matches; - use sp_utils::mpsc::tracing_unbounded; - use crate::{aux_schema, communication::tests::{Event, make_test_network}}; - use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; use sc_network::PeerId; use sp_blockchain::HeaderBackend as _; + use sp_utils::mpsc::tracing_unbounded; + use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; use futures::executor; @@ -426,12 +433,9 @@ mod tests { let voters = vec![(sp_keyring::Ed25519Keyring::Alice.public().into(), 1)]; - let persistent_data = aux_schema::load_persistent( - &*backend, - client.info().genesis_hash, - 0, - || Ok(voters), - ).unwrap(); + let persistent_data = + aux_schema::load_persistent(&*backend, client.info().genesis_hash, 0, || Ok(voters)) + .unwrap(); let (_tx, voter_command_rx) = tracing_unbounded(""); diff --git a/substrate/client/finality-grandpa/src/tests.rs b/substrate/client/finality-grandpa/src/tests.rs index 725beec6a94b2449cdb39d7c5cdf1b65bfb9755d..6243b1752c7c1e1454a97f3780838fa36e1ffb4c 100644 --- a/substrate/client/finality-grandpa/src/tests.rs +++ b/substrate/client/finality-grandpa/src/tests.rs @@ -21,31 +21,37 @@ use super::*; use assert_matches::assert_matches; use environment::HasVoted; +use futures::executor::block_on; +use futures_timer::Delay; +use parking_lot::{Mutex, RwLock}; +use sc_network::config::{ProtocolConfig, Role}; use sc_network_test::{ - Block, BlockImportAdapter, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, - TestClient, TestNetFactory, FullPeerConfig, + Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, + PeersFullClient, TestClient, TestNetFactory, }; -use sc_network::config::{ProtocolConfig, Role}; -use parking_lot::{RwLock, Mutex}; -use futures_timer::Delay; -use futures::executor::block_on; -use tokio::runtime::{Runtime, Handle}; -use sp_keyring::Ed25519Keyring; -use sp_blockchain::Result; use sp_api::{ApiRef, ProvideRuntimeApi}; -use substrate_test_runtime_client::runtime::BlockNumber; +use sp_blockchain::Result; use sp_consensus::{ - BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, - import_queue::BoxJustificationImport, + import_queue::BoxJustificationImport, BlockImport, BlockImportParams, BlockOrigin, + ForkChoiceStrategy, ImportResult, ImportedAux, }; -use std::{collections::{HashMap, HashSet}, pin::Pin}; -use sp_runtime::{Justifications, traits::{Block as BlockT, Header as HeaderT}}; -use sp_runtime::generic::{BlockId, DigestItem}; use sp_core::H256; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_finality_grandpa::{ - GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, + AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, GRANDPA_ENGINE_ID, +}; +use sp_keyring::Ed25519Keyring; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{ + generic::{BlockId, DigestItem}, + traits::{Block as BlockT, Header as HeaderT}, + Justifications, +}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, }; +use substrate_test_runtime_client::runtime::BlockNumber; +use tokio::runtime::{Handle, Runtime}; use authorities::AuthoritySet; use sc_block_builder::BlockBuilderProvider; @@ -61,7 +67,7 @@ type GrandpaBlockImport = crate::GrandpaBlockImport< substrate_test_runtime_client::Backend, Block, PeersFullClient, - LongestChain + LongestChain, >; struct GrandpaTestNet { @@ -71,10 +77,8 @@ struct GrandpaTestNet { impl GrandpaTestNet { fn new(test_config: TestApi, n_authority: usize, n_full: usize) -> Self { - let mut net = GrandpaTestNet { - peers: Vec::with_capacity(n_authority + n_full), - test_config, - }; + let mut net = + GrandpaTestNet { peers: Vec::with_capacity(n_authority + n_full), test_config }; for _ in 0..n_authority { net.add_authority_peer(); @@ -105,10 +109,7 @@ impl TestNetFactory for GrandpaTestNet { /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { - GrandpaTestNet { - peers: Vec::new(), - test_config: Default::default(), - } + GrandpaTestNet { peers: Vec::new(), test_config: Default::default() } } fn default_config() -> ProtocolConfig { @@ -133,13 +134,10 @@ impl TestNetFactory for GrandpaTestNet { PassThroughVerifier::new(false) // use non-instant finality. } - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - PeerData, - ) - { + fn make_block_import( + &self, + client: PeersClient, + ) -> (BlockImportAdapter, Option>, PeerData) { match client { PeersClient::Full(ref client, ref backend) => { let (import, link) = block_import( @@ -147,7 +145,8 @@ impl TestNetFactory for GrandpaTestNet { &self.test_config, LongestChain::new(backend.clone()), None, - ).expect("Could not create block import for fresh peer."); + ) + .expect("Could not create block import for fresh peer."); let justification_import = Box::new(import.clone()); ( BlockImportAdapter::new(import), @@ -181,9 +180,7 @@ pub(crate) struct TestApi { impl TestApi { pub fn new(genesis_authorities: AuthorityList) -> Self { - TestApi { - genesis_authorities, - } + TestApi { genesis_authorities } } } @@ -235,21 +232,24 @@ fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { fn create_keystore(authority: Ed25519Keyring) -> (SyncCryptoStorePtr, tempfile::TempDir) { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore = + Arc::new(LocalKeystore::open(keystore_path.path(), None).expect("Creates keystore")); SyncCryptoStore::ed25519_generate_new(&*keystore, GRANDPA, Some(&authority.to_seed())) .expect("Creates authority key"); (keystore, keystore_path) } -fn block_until_complete(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { +fn block_until_complete( + future: impl Future + Unpin, + net: &Arc>, + runtime: &mut Runtime, +) { let drive_to_completion = futures::future::poll_fn(|cx| { - net.lock().poll(cx); Poll::<()>::Pending + net.lock().poll(cx); + Poll::<()>::Pending }); - runtime.block_on( - future::select(future, drive_to_completion) - ); + runtime.block_on(future::select(future, drive_to_completion)); } // Spawns grandpa voters. Returns a future to spawn on the runtime. @@ -264,11 +264,9 @@ fn initialize_grandpa( let (net_service, link) = { // temporary needed for some reason - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].network_service().clone(), - link, - ) + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].network_service().clone(), link) }; let grandpa_params = GrandpaParams { @@ -288,9 +286,10 @@ fn initialize_grandpa( shared_voter_state: SharedVoterState::empty(), telemetry: None, }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + let voter = + run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - fn assert_send(_: &T) { } + fn assert_send(_: &T) {} assert_send(&voter); voters.push(voter); @@ -307,8 +306,9 @@ fn run_to_completion_with( net: Arc>, peers: &[Ed25519Keyring], with: F, -) -> u64 where - F: FnOnce(Handle) -> Option>>> +) -> u64 +where + F: FnOnce(Handle) -> Option>>>, { let mut wait_for = Vec::new(); @@ -322,20 +322,19 @@ fn run_to_completion_with( let highest_finalized = highest_finalized.clone(); let client = net.lock().peers[peer_id].client().clone(); - wait_for.push( - Box::pin( - client.finality_notification_stream() - .take_while(move |n| { - let mut highest_finalized = highest_finalized.write(); - if *n.header.number() > *highest_finalized { - *highest_finalized = *n.header.number(); - } - future::ready(n.header.number() < &blocks) - }) - .collect::>() - .map(|_| ()) - ) - ); + wait_for.push(Box::pin( + client + .finality_notification_stream() + .take_while(move |n| { + let mut highest_finalized = highest_finalized.write(); + if *n.header.number() > *highest_finalized { + *highest_finalized = *n.header.number(); + } + future::ready(n.header.number() < &blocks) + }) + .collect::>() + .map(|_| ()), + )); } // wait for all finalized on each. @@ -350,7 +349,7 @@ fn run_to_completion( runtime: &mut Runtime, blocks: u64, net: Arc>, - peers: &[Ed25519Keyring] + peers: &[Ed25519Keyring], ) -> u64 { run_to_completion_with(runtime, blocks, net, peers, |_| None) } @@ -386,8 +385,7 @@ fn finalize_3_voters_no_observers() { net.block_until_sync(); for i in 0..3 { - assert_eq!(net.peer(i).client().info().best_number, 20, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -395,7 +393,12 @@ fn finalize_3_voters_no_observers() { // normally there's no justification for finalized blocks assert!( - net.lock().peer(0).client().justifications(&BlockId::Number(20)).unwrap().is_none(), + net.lock() + .peer(0) + .client() + .justifications(&BlockId::Number(20)) + .unwrap() + .is_none(), "Extra justification for block#1", ); } @@ -425,7 +428,7 @@ fn finalize_3_voters_1_full_observer() { observer_enabled: true, telemetry: None, }, - link: link, + link, network: net_service, voting_rule: (), prometheus_registry: None, @@ -444,9 +447,10 @@ fn finalize_3_voters_1_full_observer() { for peer_id in 0..4 { let client = net.lock().peers[peer_id].client().clone(); finality_notifications.push( - client.finality_notification_stream() + client + .finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &20)) - .for_each(move |_| future::ready(())) + .for_each(move |_| future::ready(())), ); } @@ -458,9 +462,8 @@ fn finalize_3_voters_1_full_observer() { // all peers should have stored the justification for the best finalized block #20 for peer_id in 0..4 { let client = net.lock().peers[peer_id].client().as_full().unwrap(); - let justification = crate::aux_schema::best_justification::<_, Block>(&*client) - .unwrap() - .unwrap(); + let justification = + crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); assert_eq!(justification.commit.target_number, 20); } @@ -469,27 +472,16 @@ fn finalize_3_voters_1_full_observer() { #[test] fn transition_3_voters_twice_1_full_observer() { sp_tracing::try_init_simple(); - let peers_a = &[ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ]; + let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let peers_b = &[ - Ed25519Keyring::Dave, - Ed25519Keyring::Eve, - Ed25519Keyring::Ferdie, - ]; + let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; - let peers_c = &[ - Ed25519Keyring::Alice, - Ed25519Keyring::Eve, - Ed25519Keyring::Two, - ]; + let peers_c = &[Ed25519Keyring::Alice, Ed25519Keyring::Eve, Ed25519Keyring::Two]; let observer = &[Ed25519Keyring::One]; - let all_peers = peers_a.iter() + let all_peers = peers_a + .iter() .chain(peers_b) .chain(peers_c) .chain(observer) @@ -511,11 +503,9 @@ fn transition_3_voters_twice_1_full_observer() { let (net_service, link) = { let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].network_service().clone(), - link, - ) + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].network_service().clone(), link) }; let grandpa_params = GrandpaParams { @@ -536,7 +526,8 @@ fn transition_3_voters_twice_1_full_observer() { telemetry: None, }; - voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); + voters + .push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); } net.lock().peer(0).push_blocks(1, false); @@ -544,10 +535,10 @@ fn transition_3_voters_twice_1_full_observer() { for (i, peer) in net.lock().peers().iter().enumerate() { let full_client = peer.client().as_full().expect("only full clients are used in test"); - assert_eq!(full_client.chain_info().best_number, 1, - "Peer #{} failed to sync", i); + assert_eq!(full_client.chain_info().best_number, 1, "Peer #{} failed to sync", i); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -559,7 +550,8 @@ fn transition_3_voters_twice_1_full_observer() { let peers_c = peers_c.clone(); // wait for blocks to be finalized before generating new ones - let block_production = client.finality_notification_stream() + let block_production = client + .finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &30)) .for_each(move |n| { match n.header.number() { @@ -571,10 +563,10 @@ fn transition_3_voters_twice_1_full_observer() { // generate transition at block 15, applied at 20. net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 4, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 4 }, + ); block }); @@ -585,10 +577,10 @@ fn transition_3_voters_twice_1_full_observer() { // add more until we have 30. net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(&peers_c), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(&peers_c), delay: 0 }, + ); block }); @@ -612,16 +604,18 @@ fn transition_3_voters_twice_1_full_observer() { for (peer_id, _) in all_peers.into_iter().enumerate() { let client = net.lock().peers[peer_id].client().clone(); finality_notifications.push( - client.finality_notification_stream() + client + .finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &30)) .for_each(move |_| future::ready(())) .map(move |()| { let full_client = client.as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); assert_eq!(set.pending_changes().count(), 0); - }) + }), ); } @@ -648,7 +642,13 @@ fn justification_is_generated_periodically() { // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(&BlockId::Number(32)).unwrap().is_some()); + assert!(net + .lock() + .peer(i) + .client() + .justifications(&BlockId::Number(32)) + .unwrap() + .is_some()); } } @@ -670,10 +670,10 @@ fn sync_justifications_on_change_blocks() { // at block 21 we do add a transition which is instant net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); block }); @@ -682,8 +682,7 @@ fn sync_justifications_on_change_blocks() { net.block_until_sync(); for i in 0..4 { - assert_eq!(net.peer(i).client().info().best_number, 25, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 25, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -693,12 +692,25 @@ fn sync_justifications_on_change_blocks() { // the first 3 peers are grandpa voters and therefore have already finalized // block 21 and stored a justification for i in 0..3 { - assert!(net.lock().peer(i).client().justifications(&BlockId::Number(21)).unwrap().is_some()); + assert!(net + .lock() + .peer(i) + .client() + .justifications(&BlockId::Number(21)) + .unwrap() + .is_some()); } // the last peer should get the justification by syncing from other peers futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(3).client().justifications(&BlockId::Number(21)).unwrap().is_none() { + if net + .lock() + .peer(3) + .client() + .justifications(&BlockId::Number(21)) + .unwrap() + .is_none() + { net.lock().poll(cx); Poll::Pending } else { @@ -717,8 +729,12 @@ fn finalizes_multiple_pending_changes_in_order() { let peers_c = &[Ed25519Keyring::Dave, Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let all_peers = &[ - Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie, - Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie, + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + Ed25519Keyring::Dave, + Ed25519Keyring::Eve, + Ed25519Keyring::Ferdie, ]; let genesis_voters = make_ids(peers_a); @@ -735,10 +751,10 @@ fn finalizes_multiple_pending_changes_in_order() { // at block 21 we do add a transition which is instant net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); block }); @@ -748,10 +764,10 @@ fn finalizes_multiple_pending_changes_in_order() { // at block 26 we add another which is enacted at block 30 net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_c), - delay: 4, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_c), delay: 4 }, + ); block }); @@ -762,8 +778,7 @@ fn finalizes_multiple_pending_changes_in_order() { // all peers imported both change blocks for i in 0..6 { - assert_eq!(net.peer(i).client().info().best_number, 30, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 30, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -794,16 +809,17 @@ fn force_change_to_new_set() { let mut block = builder.build().unwrap().block; // add a forced transition at block 12. - add_forced_change(&mut block, 0, ScheduledChange { - next_authorities: voters.clone(), - delay: 10, - }); + add_forced_change( + &mut block, + 0, + ScheduledChange { next_authorities: voters.clone(), delay: 10 }, + ); // add a normal transition too to ensure that forced changes take priority. - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(genesis_authorities), - delay: 5, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(genesis_authorities), delay: 5 }, + ); block }); @@ -812,11 +828,11 @@ fn force_change_to_new_set() { net.lock().block_until_sync(); for (i, peer) in net.lock().peers().iter().enumerate() { - assert_eq!(peer.client().info().best_number, 26, - "Peer #{} failed to sync", i); + assert_eq!(peer.client().info().best_number, 26, "Peer #{} failed to sync", i); let full_client = peer.client().as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); assert_eq!(set.current(), (1, voters.as_slice())); assert_eq!(set.pending_changes().count(), 0); @@ -841,12 +857,14 @@ fn allows_reimporting_change_blocks() { let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().unwrap(); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); let block = || { let block = block.clone(); @@ -886,13 +904,15 @@ fn test_bad_justification() { let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); + add_scheduled_change( + &mut block, + ScheduledChange { next_authorities: make_ids(peers_b), delay: 0 }, + ); let block = || { let block = block.clone(); @@ -923,8 +943,8 @@ fn test_bad_justification() { #[test] fn voter_persists_its_votes() { - use std::sync::atomic::{AtomicUsize, Ordering}; use futures::future; + use std::sync::atomic::{AtomicUsize, Ordering}; sp_tracing::try_init_simple(); let mut runtime = Runtime::new().unwrap(); @@ -959,8 +979,7 @@ fn voter_persists_its_votes() { let set_state = { let bob_client = net.peer(1).client().clone(); - let (_, _, link) = net - .make_block_import(bob_client); + let (_, _, link) = net.make_block_import(bob_client); let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); let PersistentData { set_state, .. } = persistent_data; set_state @@ -983,10 +1002,7 @@ fn voter_persists_its_votes() { let (net_service, link) = { // temporary needed for some reason let link = net.peers[0].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[0].network_service().clone(), - link, - ) + (net.peers[0].network_service().clone(), link) }; let grandpa_params = GrandpaParams { @@ -1026,8 +1042,7 @@ fn voter_persists_its_votes() { // read the persisted state after aborting alice_voter1. let alice_client = net.peer(0).client().clone(); - let (_block_import, _, link) = net - .make_block_import(alice_client); + let (_block_import, _, link) = net.make_block_import(alice_client); let link = link.lock().take().unwrap(); let grandpa_params = GrandpaParams { @@ -1064,8 +1079,7 @@ fn voter_persists_its_votes() { net.peer(0).push_blocks(20, false); net.block_until_sync(); - assert_eq!(net.peer(0).client().info().best_number, 20, - "Peer #{} failed to sync", 0); + assert_eq!(net.peer(0).client().info().best_number, 20, "Peer #{} failed to sync", 0); let net = Arc::new(Mutex::new(net)); @@ -1113,12 +1127,13 @@ fn voter_persists_its_votes() { // we push 20 more blocks to alice's chain net.lock().peer(0).push_blocks(20, false); - let interval = futures::stream::unfold(Delay::new(Duration::from_millis(200)), |delay| - Box::pin(async move { - delay.await; - Some(((), Delay::new(Duration::from_millis(200)))) - }) - ); + let interval = + futures::stream::unfold(Delay::new(Duration::from_millis(200)), |delay| { + Box::pin(async move { + delay.await; + Some(((), Delay::new(Duration::from_millis(200)))) + }) + }); interval .take_while(move |_| { @@ -1135,17 +1150,19 @@ fn voter_persists_its_votes() { runtime_handle.spawn(alice_voter2(peers, net.clone())); // and we push our own prevote for block 30 - let prevote = finality_grandpa::Prevote { - target_number: 30, - target_hash: block_30_hash, - }; + let prevote = + finality_grandpa::Prevote { target_number: 30, target_hash: block_30_hash }; // One should either be calling `Sink::send` or `Sink::start_send` followed // by `Sink::poll_complete` to make sure items are being flushed. Given that // we send in a loop including a delay until items are received, this can be // ignored for the sake of reduced complexity. - Pin::new(&mut *round_tx.lock()).start_send(finality_grandpa::Message::Prevote(prevote)).unwrap(); - } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 1 { + Pin::new(&mut *round_tx.lock()) + .start_send(finality_grandpa::Message::Prevote(prevote)) + .unwrap(); + } else if state.compare_exchange(1, 2, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 1 + { // the next message we receive should be our own prevote let prevote = match signed.message { finality_grandpa::Message::Prevote(prevote) => prevote, @@ -1155,11 +1172,12 @@ fn voter_persists_its_votes() { // targeting block 30 assert!(prevote.target_number == 30); - // after alice restarts it should send its previous prevote - // therefore we won't ever receive it again since it will be a - // known message on the gossip layer - - } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() == 2 { + // after alice restarts it should send its previous prevote + // therefore we won't ever receive it again since it will be a + // known message on the gossip layer + } else if state.compare_exchange(2, 3, Ordering::SeqCst, Ordering::SeqCst).unwrap() == + 2 + { // we then receive a precommit from alice for block 15 // even though we casted a prevote for block 30 let precommit = match signed.message { @@ -1202,13 +1220,13 @@ fn finalize_3_voters_1_light_observer() { }, net.peers[3].data.lock().take().expect("link initialized at startup; qed"), net.peers[3].network_service().clone(), - ).unwrap(); + ) + .unwrap(); net.peer(0).push_blocks(20, false); net.block_until_sync(); for i in 0..4 { - assert_eq!(net.peer(i).client().info().best_number, 20, - "Peer #{} failed to sync", i); + assert_eq!(net.peer(i).client().info().best_number, 20, "Peer #{} failed to sync", i); } let net = Arc::new(Mutex::new(net)); @@ -1231,7 +1249,11 @@ fn voter_catches_up_to_latest_round_when_behind() { let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); - let voter = |keystore, peer_id, link, net: Arc>| -> Pin + Send>> { + let voter = |keystore, + peer_id, + link, + net: Arc>| + -> Pin + Send>> { let grandpa_params = GrandpaParams { config: Config { gossip_duration: TEST_GOSSIP_DURATION, @@ -1259,17 +1281,16 @@ fn voter_catches_up_to_latest_round_when_behind() { for (peer_id, key) in peers.iter().enumerate() { let (client, link) = { let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - link, - ) + let link = + net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); + (net.peers[peer_id].client().clone(), link) }; finality_notifications.push( - client.finality_notification_stream() + client + .finality_notification_stream() .take_while(|n| future::ready(n.header.number() < &50)) - .for_each(move |_| future::ready(())) + .for_each(move |_| future::ready(())), ); let (keystore, keystore_path) = create_keystore(*key); @@ -1324,11 +1345,10 @@ fn voter_catches_up_to_latest_round_when_behind() { }; let drive_to_completion = futures::future::poll_fn(|cx| { - net.lock().poll(cx); Poll::<()>::Pending + net.lock().poll(cx); + Poll::<()>::Pending }); - runtime.block_on( - future::select(test, drive_to_completion) - ); + runtime.block_on(future::select(test, drive_to_completion)); } type TestEnvironment = Environment< @@ -1350,11 +1370,7 @@ where N: NetworkT, VR: VotingRule, { - let PersistentData { - ref authority_set, - ref set_state, - .. - } = link.persistent_data; + let PersistentData { ref authority_set, ref set_state, .. } = link.persistent_data; let config = Config { gossip_duration: TEST_GOSSIP_DURATION, @@ -1366,13 +1382,8 @@ where telemetry: None, }; - let network = NetworkBridge::new( - network_service.clone(), - config.clone(), - set_state.clone(), - None, - None, - ); + let network = + NetworkBridge::new(network_service.clone(), config.clone(), set_state.clone(), None, None); Environment { authority_set: authority_set.clone(), @@ -1428,25 +1439,28 @@ fn grandpa_environment_respects_voting_rules() { // the unrestricted environment should just return the best block assert_eq!( - block_on(unrestricted_env.best_chain_containing( - peer.client().info().finalized_hash - )).unwrap().unwrap().1, + block_on(unrestricted_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 21, ); // both the other environments should return block 16, which is 3/4 of the // way in the unfinalized chain assert_eq!( - block_on(three_quarters_env.best_chain_containing( - peer.client().info().finalized_hash - )).unwrap().unwrap().1, + block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 16, ); assert_eq!( - block_on(default_env.best_chain_containing( - peer.client().info().finalized_hash - )).unwrap().unwrap().1, + block_on(default_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 16, ); @@ -1455,18 +1469,20 @@ fn grandpa_environment_respects_voting_rules() { // the 3/4 environment should propose block 21 for voting assert_eq!( - block_on(three_quarters_env.best_chain_containing( - peer.client().info().finalized_hash - )).unwrap().unwrap().1, + block_on(three_quarters_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 21, ); // while the default environment will always still make sure we don't vote // on the best block (2 behind) assert_eq!( - block_on(default_env.best_chain_containing( - peer.client().info().finalized_hash - )).unwrap().unwrap().1, + block_on(default_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 19, ); @@ -1477,9 +1493,10 @@ fn grandpa_environment_respects_voting_rules() { // best block, there's a hard rule that we can't cast any votes lower than // the given base (#21). assert_eq!( - block_on(default_env.best_chain_containing( - peer.client().info().finalized_hash - )).unwrap().unwrap().1, + block_on(default_env.best_chain_containing(peer.client().info().finalized_hash)) + .unwrap() + .unwrap() + .1, 21, ); } @@ -1518,9 +1535,7 @@ fn grandpa_environment_never_overwrites_round_voter_state() { assert_eq!(get_current_round(2), None); // after completing round 1 we should start tracking round 2 - environment - .completed(1, round_state(), base(), &historical_votes()) - .unwrap(); + environment.completed(1, round_state(), base(), &historical_votes()).unwrap(); assert_eq!(get_current_round(2).unwrap(), HasVoted::No); @@ -1530,10 +1545,8 @@ fn grandpa_environment_never_overwrites_round_voter_state() { let info = peer.client().info(); - let prevote = finality_grandpa::Prevote { - target_hash: info.best_hash, - target_number: info.best_number, - }; + let prevote = + finality_grandpa::Prevote { target_hash: info.best_hash, target_number: info.best_number }; // we prevote for round 2 which should lead to us updating the voter state environment.prevoted(2, prevote.clone()).unwrap(); @@ -1545,9 +1558,7 @@ fn grandpa_environment_never_overwrites_round_voter_state() { // if we report round 1 as completed again we should not overwrite the // voter state for round 2 - environment - .completed(1, round_state(), base(), &historical_votes()) - .unwrap(); + environment.completed(1, round_state(), base(), &historical_votes()).unwrap(); assert_matches!(get_current_round(2).unwrap(), HasVoted::Yes(_, _)); } @@ -1566,7 +1577,9 @@ fn imports_justification_for_regular_blocks_on_import() { let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); let block = builder.build().unwrap().block; let block_hash = block.hash(); @@ -1597,11 +1610,7 @@ fn imports_justification_for_regular_blocks_on_import() { precommits: vec![precommit], }; - GrandpaJustification::from_commit( - &full_client, - round, - commit, - ).unwrap() + GrandpaJustification::from_commit(&full_client, round, commit).unwrap() }; // we import the block with justification attached @@ -1622,9 +1631,7 @@ fn imports_justification_for_regular_blocks_on_import() { ); // the justification should be imported and available from the client - assert!( - client.justifications(&BlockId::Hash(block_hash)).unwrap().is_some(), - ); + assert!(client.justifications(&BlockId::Hash(block_hash)).unwrap().is_some(),); } #[test] @@ -1644,10 +1651,7 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { }; let signed_prevote = { - let prevote = finality_grandpa::Prevote { - target_hash: H256::random(), - target_number: 1, - }; + let prevote = finality_grandpa::Prevote { target_hash: H256::random(), target_number: 1 }; let signed = alice.sign(&[]).into(); (prevote, signed) @@ -1667,10 +1671,7 @@ fn grandpa_environment_doesnt_send_equivocation_reports_for_itself() { // reporting the equivocation should fail since the offender is a local // authority (i.e. we have keys in our keystore for the given id) let equivocation_proof = sp_finality_grandpa::Equivocation::Prevote(equivocation.clone()); - assert!(matches!( - environment.report_equivocation(equivocation_proof), - Err(Error::Safety(_)) - )); + assert!(matches!(environment.report_equivocation(equivocation_proof), Err(Error::Safety(_)))); // if we set the equivocation offender to another id for which we don't have // keys it should work diff --git a/substrate/client/finality-grandpa/src/until_imported.rs b/substrate/client/finality-grandpa/src/until_imported.rs index 7cfd9e6074c47ef0c8ee103f31ae7950addd3883..ccab843316d2c605feb53bb2b7b6c9d196b38429 100644 --- a/substrate/client/finality-grandpa/src/until_imported.rs +++ b/substrate/client/finality-grandpa/src/until_imported.rs @@ -23,32 +23,31 @@ //! This is used for votes and commit messages currently. use super::{ - BlockStatus as BlockStatusT, - BlockSyncRequester as BlockSyncRequesterT, - CommunicationIn, - Error, + BlockStatus as BlockStatusT, BlockSyncRequester as BlockSyncRequesterT, CommunicationIn, Error, SignedMessage, }; -use log::{debug, warn}; -use sp_utils::mpsc::TracingUnboundedReceiver; -use futures::prelude::*; -use futures::stream::{Fuse, StreamExt}; -use futures_timer::Delay; use finality_grandpa::voter; -use parking_lot::Mutex; -use prometheus_endpoint::{ - Gauge, U64, PrometheusError, register, Registry, +use futures::{ + prelude::*, + stream::{Fuse, StreamExt}, }; +use futures_timer::Delay; +use log::{debug, warn}; +use parking_lot::Mutex; +use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; use sc_client_api::{BlockImportNotification, ImportNotifications}; use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_utils::mpsc::TracingUnboundedReceiver; -use std::collections::{HashMap, VecDeque}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; +use std::{ + collections::{HashMap, VecDeque}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Instant; const LOG_PENDING_INTERVAL: Duration = Duration::from_secs(15); @@ -84,7 +83,6 @@ pub(crate) enum DiscardWaitOrReady { } /// Prometheus metrics for the `UntilImported` queue. -// // At a given point in time there can be more than one `UntilImported` queue. One can not register a // metric twice, thus queues need to share the same Prometheus metrics instead of instantiating // their own ones. @@ -101,10 +99,13 @@ pub(crate) struct Metrics { impl Metrics { pub(crate) fn register(registry: &Registry) -> Result { Ok(Self { - global_waiting_messages: register(Gauge::new( - "finality_grandpa_until_imported_waiting_messages_number", - "Number of finality grandpa messages waiting within the until imported queue.", - )?, registry)?, + global_waiting_messages: register( + Gauge::new( + "finality_grandpa_until_imported_waiting_messages_number", + "Number of finality grandpa messages waiting within the until imported queue.", + )?, + registry, + )?, local_waiting_messages: 0, }) } @@ -120,7 +121,6 @@ impl Metrics { } } - impl Clone for Metrics { fn clone(&self) -> Self { Metrics { @@ -136,8 +136,7 @@ impl Drop for Metrics { fn drop(&mut self) { // Reduce the global counter by the amount of messages that were still left in the dropped // queue. - self.global_waiting_messages - .sub(self.local_waiting_messages) + self.global_waiting_messages.sub(self.local_waiting_messages) } } @@ -200,11 +199,12 @@ where // used in the event of missed import notifications const CHECK_PENDING_INTERVAL: Duration = Duration::from_secs(5); - let check_pending = futures::stream::unfold(Delay::new(CHECK_PENDING_INTERVAL), |delay| + let check_pending = futures::stream::unfold(Delay::new(CHECK_PENDING_INTERVAL), |delay| { Box::pin(async move { delay.await; Some((Ok(()), Delay::new(CHECK_PENDING_INTERVAL))) - })); + }) + }); UntilImported { import_notifications: import_notifications.fuse(), @@ -220,7 +220,9 @@ where } } -impl Stream for UntilImported where +impl Stream + for UntilImported +where Block: BlockT, BStatus: BlockStatusT, BSyncRequester: BlockSyncRequesterT, @@ -257,7 +259,7 @@ impl Stream for UntilImported break, } } @@ -269,12 +271,12 @@ impl Stream for UntilImported break, } } @@ -286,7 +288,9 @@ impl Stream for UntilImported Stream for UntilImported BlockUntilImported for SignedMessage { if let Some(number) = status_check.block_number(target_hash)? { if number != target_number { warn_authority_wrong_target(target_hash, msg.id); - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } else { - return Ok(DiscardWaitOrReady::Ready(msg)); + return Ok(DiscardWaitOrReady::Ready(msg)) } } @@ -386,13 +390,8 @@ impl BlockUntilImported for SignedMessage { /// Helper type definition for the stream which waits until vote targets for /// signed messages are imported. -pub(crate) type UntilVoteTargetImported = UntilImported< - Block, - BlockStatus, - BlockSyncRequester, - I, - SignedMessage, ->; +pub(crate) type UntilVoteTargetImported = + UntilImported>; /// This blocks a global message import, i.e. a commit or catch up messages, /// until all blocks referenced in its votes are known. @@ -445,19 +444,18 @@ impl BlockUntilImported for BlockGlobalMessage { if let Some(number) = status_check.block_number(target_hash)? { entry.insert(KnownOrUnknown::Known(number)); number - } else { entry.insert(KnownOrUnknown::Unknown(perceived_number)); perceived_number } - } + }, }; if canon_number != perceived_number { // invalid global message: messages targeting wrong number // or at least different from other vote in same global // message. - return Ok(false); + return Ok(false) } Ok(true) @@ -466,23 +464,24 @@ impl BlockUntilImported for BlockGlobalMessage { match input { voter::CommunicationIn::Commit(_, ref commit, ..) => { // add known hashes from all precommits. - let precommit_targets = commit.precommits - .iter() - .map(|c| (c.target_number, c.target_hash)); + let precommit_targets = + commit.precommits.iter().map(|c| (c.target_number, c.target_hash)); for (target_number, target_hash) in precommit_targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } } }, voter::CommunicationIn::CatchUp(ref catch_up, ..) => { // add known hashes from all prevotes and precommits. - let prevote_targets = catch_up.prevotes + let prevote_targets = catch_up + .prevotes .iter() .map(|s| (s.prevote.target_number, s.prevote.target_hash)); - let precommit_targets = catch_up.precommits + let precommit_targets = catch_up + .precommits .iter() .map(|s| (s.precommit.target_number, s.precommit.target_hash)); @@ -490,29 +489,39 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in targets { if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); + return Ok(DiscardWaitOrReady::Discard) } } }, }; } - let unknown_hashes = checked_hashes.into_iter().filter_map(|(hash, num)| match num { - KnownOrUnknown::Unknown(number) => Some((hash, number)), - KnownOrUnknown::Known(_) => None, - }).collect::>(); + let unknown_hashes = checked_hashes + .into_iter() + .filter_map(|(hash, num)| match num { + KnownOrUnknown::Unknown(number) => Some((hash, number)), + KnownOrUnknown::Known(_) => None, + }) + .collect::>(); if unknown_hashes.is_empty() { // none of the hashes in the global message were unknown. // we can just return the message directly. - return Ok(DiscardWaitOrReady::Ready(input)); + return Ok(DiscardWaitOrReady::Ready(input)) } let locked_global = Arc::new(Mutex::new(Some(input))); - let items_to_await = unknown_hashes.into_iter().map(|(hash, target_number)| { - (hash, target_number, BlockGlobalMessage { inner: locked_global.clone(), target_number }) - }).collect(); + let items_to_await = unknown_hashes + .into_iter() + .map(|(hash, target_number)| { + ( + hash, + target_number, + BlockGlobalMessage { inner: locked_global.clone(), target_number }, + ) + }) + .collect(); // schedule waits for all unknown messages. // when the last one of these has `wait_completed` called on it, @@ -525,7 +534,7 @@ impl BlockUntilImported for BlockGlobalMessage { // Delete the inner message so it won't ever be forwarded. Future calls to // `wait_completed` on the same `inner` will ignore it. *self.inner.lock() = None; - return None; + return None } match Arc::try_unwrap(self.inner) { @@ -542,25 +551,20 @@ impl BlockUntilImported for BlockGlobalMessage { /// A stream which gates off incoming global messages, i.e. commit and catch up /// messages, until all referenced block hashes have been imported. -pub(crate) type UntilGlobalMessageBlocksImported = UntilImported< - Block, - BlockStatus, - BlockSyncRequester, - I, - BlockGlobalMessage, ->; +pub(crate) type UntilGlobalMessageBlocksImported = + UntilImported>; #[cfg(test)] mod tests { use super::*; use crate::{CatchUp, CompactCommit}; - use substrate_test_runtime_client::runtime::{Block, Hash, Header}; - use sp_consensus::BlockOrigin; - use sc_client_api::BlockImportNotification; + use finality_grandpa::Precommit; use futures::future::Either; use futures_timer::Delay; + use sc_client_api::BlockImportNotification; + use sp_consensus::BlockOrigin; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; - use finality_grandpa::Precommit; + use substrate_test_runtime_client::runtime::{Block, Hash, Header}; #[derive(Clone)] struct TestChainState { @@ -571,10 +575,8 @@ mod tests { impl TestChainState { fn new() -> (Self, ImportNotifications) { let (tx, rx) = tracing_unbounded("test"); - let state = TestChainState { - sender: tx, - known_blocks: Arc::new(Mutex::new(HashMap::new())), - }; + let state = + TestChainState { sender: tx, known_blocks: Arc::new(Mutex::new(HashMap::new())) }; (state, rx) } @@ -588,13 +590,15 @@ mod tests { let number = header.number().clone(); self.known_blocks.lock().insert(hash, number); - self.sender.unbounded_send(BlockImportNotification { - hash, - origin: BlockOrigin::File, - header, - is_new_best: false, - tree_route: None, - }).unwrap(); + self.sender + .unbounded_send(BlockImportNotification { + hash, + origin: BlockOrigin::File, + header, + is_new_best: false, + tree_route: None, + }) + .unwrap(); } } @@ -615,14 +619,17 @@ mod tests { impl Default for TestBlockSyncRequester { fn default() -> Self { - TestBlockSyncRequester { - requests: Arc::new(Mutex::new(Vec::new())), - } + TestBlockSyncRequester { requests: Arc::new(Mutex::new(Vec::new())) } } } impl BlockSyncRequesterT for TestBlockSyncRequester { - fn set_sync_fork_request(&self, _peers: Vec, hash: Hash, number: NumberFor) { + fn set_sync_fork_request( + &self, + _peers: Vec, + hash: Hash, + number: NumberFor, + ) { self.requests.lock().push((hash, number)); } } @@ -639,7 +646,7 @@ mod tests { // unwrap the commit from `CommunicationIn` returning its fields in a tuple, // panics if the given message isn't a commit - fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit::) { + fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit) { match msg { voter::CommunicationIn::Commit(round, commit, ..) => (round, commit), _ => panic!("expected commit"), @@ -658,7 +665,8 @@ mod tests { fn message_all_dependencies_satisfied( msg: CommunicationIn, enact_dependencies: F, - ) -> CommunicationIn where + ) -> CommunicationIn + where F: FnOnce(&TestChainState), { let (chain_state, import_notifications) = TestChainState::new(); @@ -688,7 +696,8 @@ mod tests { fn blocking_message_on_dependencies( msg: CommunicationIn, enact_dependencies: F, - ) -> CommunicationIn where + ) -> CommunicationIn + where F: FnOnce(&TestChainState), { let (chain_state, import_notifications) = TestChainState::new(); @@ -710,16 +719,17 @@ mod tests { // NOTE: needs to be cloned otherwise it is moved to the stream and // dropped too early. let inner_chain_state = chain_state.clone(); - let work = future::select(until_imported.into_future(), Delay::new(Duration::from_millis(100))) - .then(move |res| match res { - Either::Left(_) => panic!("timeout should have fired first"), - Either::Right((_, until_imported)) => { - // timeout fired. push in the headers. - enact_dependencies(&inner_chain_state); - - until_imported - } - }); + let work = + future::select(until_imported.into_future(), Delay::new(Duration::from_millis(100))) + .then(move |res| match res { + Either::Left(_) => panic!("timeout should have fired first"), + Either::Right((_, until_imported)) => { + // timeout fired. push in the headers. + enact_dependencies(&inner_chain_state); + + until_imported + }, + }); futures::executor::block_on(work).0.unwrap().unwrap() } @@ -734,37 +744,22 @@ mod tests { target_hash: h1.hash(), target_number: 5, precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, ], auth_data: Vec::new(), // not used }; - let unknown_commit = || voter::CommunicationIn::Commit( - 0, - unknown_commit.clone(), - voter::Callback::Blank, - ); + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); - let res = blocking_message_on_dependencies( - unknown_commit(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = blocking_message_on_dependencies(unknown_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_commit(res), - unapply_commit(unknown_commit()), - ); + assert_eq!(unapply_commit(res), unapply_commit(unknown_commit()),); } #[test] @@ -777,37 +772,22 @@ mod tests { target_hash: h1.hash(), target_number: 5, precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, ], auth_data: Vec::new(), // not used }; - let known_commit = || voter::CommunicationIn::Commit( - 0, - known_commit.clone(), - voter::Callback::Blank, - ); + let known_commit = + || voter::CommunicationIn::Commit(0, known_commit.clone(), voter::Callback::Blank); - let res = message_all_dependencies_satisfied( - known_commit(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = message_all_dependencies_satisfied(known_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_commit(res), - unapply_commit(known_commit()), - ); + assert_eq!(unapply_commit(res), unapply_commit(known_commit()),); } #[test] @@ -816,37 +796,27 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let signed_prevote = |header: &Header| { - finality_grandpa::SignedPrevote { - id: Default::default(), - signature: Default::default(), - prevote: finality_grandpa::Prevote { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: Default::default(), + signature: Default::default(), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let signed_precommit = |header: &Header| { - finality_grandpa::SignedPrecommit { - id: Default::default(), - signature: Default::default(), - precommit: finality_grandpa::Precommit { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: Default::default(), + signature: Default::default(), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let prevotes = vec![ - signed_prevote(&h1), - signed_prevote(&h3), - ]; + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; - let precommits = vec![ - signed_precommit(&h1), - signed_precommit(&h2), - ]; + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; let unknown_catch_up = finality_grandpa::CatchUp { round_number: 1, @@ -856,24 +826,16 @@ mod tests { base_number: *h1.number(), }; - let unknown_catch_up = || voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); - let res = blocking_message_on_dependencies( - unknown_catch_up(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = blocking_message_on_dependencies(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_catch_up(res), - unapply_catch_up(unknown_catch_up()), - ); + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up()),); } #[test] @@ -882,37 +844,27 @@ mod tests { let h2 = make_header(6); let h3 = make_header(7); - let signed_prevote = |header: &Header| { - finality_grandpa::SignedPrevote { - id: Default::default(), - signature: Default::default(), - prevote: finality_grandpa::Prevote { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: Default::default(), + signature: Default::default(), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let signed_precommit = |header: &Header| { - finality_grandpa::SignedPrecommit { - id: Default::default(), - signature: Default::default(), - precommit: finality_grandpa::Precommit { - target_hash: header.hash(), - target_number: *header.number(), - }, - } + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: Default::default(), + signature: Default::default(), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, }; - let prevotes = vec![ - signed_prevote(&h1), - signed_prevote(&h3), - ]; + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; - let precommits = vec![ - signed_precommit(&h1), - signed_precommit(&h2), - ]; + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; let unknown_catch_up = finality_grandpa::CatchUp { round_number: 1, @@ -922,24 +874,16 @@ mod tests { base_number: *h1.number(), }; - let unknown_catch_up = || voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); - let res = message_all_dependencies_satisfied( - unknown_catch_up(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); + let res = message_all_dependencies_satisfied(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); - assert_eq!( - unapply_catch_up(res), - unapply_catch_up(unknown_catch_up()), - ); + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up()),); } #[test] @@ -970,23 +914,14 @@ mod tests { target_hash: h1.hash(), target_number: 5, precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, + Precommit { target_hash: h2.hash(), target_number: 6 }, + Precommit { target_hash: h3.hash(), target_number: 7 }, ], auth_data: Vec::new(), // not used }; - let unknown_commit = || voter::CommunicationIn::Commit( - 0, - unknown_commit.clone(), - voter::Callback::Blank, - ); + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); // we send the commit message and spawn the until_imported stream global_tx.unbounded_send(unknown_commit()).unwrap(); @@ -1002,7 +937,7 @@ mod tests { if block_sync_requests.contains(&(h2.hash(), *h2.number())) && block_sync_requests.contains(&(h3.hash(), *h3.number())) { - return Poll::Ready(()); + return Poll::Ready(()) } // NOTE: nothing in this function is future-aware (i.e nothing gets registered to wake @@ -1016,10 +951,12 @@ mod tests { // the `until_imported` stream doesn't request the blocks immediately, // but it should request them after a small timeout let timeout = Delay::new(Duration::from_secs(60)); - let test = future::select(assert, timeout).map(|res| match res { - Either::Left(_) => {}, - Either::Right(_) => panic!("timed out waiting for block sync request"), - }).map(drop); + let test = future::select(assert, timeout) + .map(|res| match res { + Either::Left(_) => {}, + Either::Right(_) => panic!("timed out waiting for block sync request"), + }) + .map(drop); futures::executor::block_on(test); } @@ -1035,10 +972,8 @@ mod tests { base_number: *header.number(), }; - let catch_up = voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); + let catch_up = + voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); Arc::new(Mutex::new(Some(catch_up))) } @@ -1047,15 +982,10 @@ mod tests { fn block_global_message_wait_completed_return_when_all_awaited() { let msg_inner = test_catch_up(); - let waiting_block_1 = BlockGlobalMessage:: { - inner: msg_inner.clone(), - target_number: 1, - }; + let waiting_block_1 = + BlockGlobalMessage:: { inner: msg_inner.clone(), target_number: 1 }; - let waiting_block_2 = BlockGlobalMessage:: { - inner: msg_inner, - target_number: 2, - }; + let waiting_block_2 = BlockGlobalMessage:: { inner: msg_inner, target_number: 2 }; // waiting_block_2 is still waiting for block 2, thus this should return `None`. assert!(waiting_block_1.wait_completed(1).is_none()); @@ -1069,15 +999,10 @@ mod tests { fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { let msg_inner = test_catch_up(); - let waiting_block_1 = BlockGlobalMessage:: { - inner: msg_inner.clone(), - target_number: 1, - }; + let waiting_block_1 = + BlockGlobalMessage:: { inner: msg_inner.clone(), target_number: 1 }; - let waiting_block_2 = BlockGlobalMessage:: { - inner: msg_inner, - target_number: 2, - }; + let waiting_block_2 = BlockGlobalMessage:: { inner: msg_inner, target_number: 2 }; // Calling wait_completed with wrong block number should yield None. assert!(waiting_block_1.wait_completed(1234).is_none()); diff --git a/substrate/client/finality-grandpa/src/voting_rule.rs b/substrate/client/finality-grandpa/src/voting_rule.rs index a5515c1be23ed5b19c69acfaef887ec18ac0a05b..b974afe0d352ea99a007333610db162f435af7f1 100644 --- a/substrate/client/finality-grandpa/src/voting_rule.rs +++ b/substrate/client/finality-grandpa/src/voting_rule.rs @@ -22,15 +22,15 @@ //! restrictions that are taken into account by the GRANDPA environment when //! selecting a finality target to vote on. -use std::future::Future; -use std::sync::Arc; -use std::pin::Pin; +use std::{future::Future, pin::Pin, sync::Arc}; use dyn_clone::DynClone; use sc_client_api::blockchain::HeaderBackend; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, Zero}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, NumberFor, One, Zero}, +}; /// A future returned by a `VotingRule` to restrict a given vote, if any restriction is necessary. pub type VotingRuleResult = @@ -63,7 +63,8 @@ where ) -> VotingRuleResult; } -impl VotingRule for () where +impl VotingRule for () +where Block: BlockT, B: HeaderBackend, { @@ -83,7 +84,8 @@ impl VotingRule for () where /// behind the best block. #[derive(Clone)] pub struct BeforeBestBlockBy(N); -impl VotingRule for BeforeBestBlockBy> where +impl VotingRule for BeforeBestBlockBy> +where Block: BlockT, B: HeaderBackend, { @@ -97,7 +99,7 @@ impl VotingRule for BeforeBestBlockBy> wher use sp_arithmetic::traits::Saturating; if current_target.number().is_zero() { - return Box::pin(async { None }); + return Box::pin(async { None }) } // find the target number restricted by this rule @@ -105,17 +107,13 @@ impl VotingRule for BeforeBestBlockBy> wher // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return Box::pin(async { None }); + return Box::pin(async { None }) } let current_target = current_target.clone(); // find the block at the given target height - Box::pin(std::future::ready(find_target( - &*backend, - target_number.clone(), - ¤t_target, - ))) + Box::pin(std::future::ready(find_target(&*backend, target_number.clone(), ¤t_target))) } } @@ -125,7 +123,8 @@ impl VotingRule for BeforeBestBlockBy> wher #[derive(Clone)] pub struct ThreeQuartersOfTheUnfinalizedChain; -impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where +impl VotingRule for ThreeQuartersOfTheUnfinalizedChain +where Block: BlockT, B: HeaderBackend, { @@ -150,15 +149,11 @@ impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return Box::pin(async { None }); + return Box::pin(async { None }) } // find the block at the given target height - Box::pin(std::future::ready(find_target( - &*backend, - target_number, - current_target, - ))) + Box::pin(std::future::ready(find_target(&*backend, target_number, current_target))) } } @@ -167,7 +162,8 @@ fn find_target( backend: &B, target_number: NumberFor, current_header: &Block::Header, -) -> Option<(Block::Hash, NumberFor)> where +) -> Option<(Block::Hash, NumberFor)> +where Block: BlockT, B: HeaderBackend, { @@ -184,11 +180,13 @@ fn find_target( } if *target_header.number() == target_number { - return Some((target_hash, target_number)); + return Some((target_hash, target_number)) } target_hash = *target_header.parent_hash(); - target_header = backend.header(BlockId::Hash(target_hash)).ok()? + target_header = backend + .header(BlockId::Hash(target_hash)) + .ok()? .expect("Header known to exist due to the existence of one of its descendents; qed"); } } @@ -199,13 +197,12 @@ struct VotingRules { impl Clone for VotingRules { fn clone(&self) -> Self { - VotingRules { - rules: self.rules.clone(), - } + VotingRules { rules: self.rules.clone() } } } -impl VotingRule for VotingRules where +impl VotingRule for VotingRules +where Block: BlockT, B: HeaderBackend + 'static, { @@ -230,8 +227,8 @@ impl VotingRule for VotingRules where .await .filter(|(_, restricted_number)| { // NOTE: we can only restrict votes within the interval [base, target) - restricted_number >= base.number() - && restricted_number < restricted_target.number() + restricted_number >= base.number() && + restricted_number < restricted_target.number() }) .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) .and_then(std::convert::identity) @@ -257,7 +254,8 @@ pub struct VotingRulesBuilder { rules: Vec>>, } -impl Default for VotingRulesBuilder where +impl Default for VotingRulesBuilder +where Block: BlockT, B: HeaderBackend + 'static, { @@ -268,19 +266,19 @@ impl Default for VotingRulesBuilder where } } -impl VotingRulesBuilder where +impl VotingRulesBuilder +where Block: BlockT, B: HeaderBackend + 'static, { /// Return a new voting rule builder using the given backend. pub fn new() -> Self { - VotingRulesBuilder { - rules: Vec::new(), - } + VotingRulesBuilder { rules: Vec::new() } } /// Add a new voting rule to the builder. - pub fn add(mut self, rule: R) -> Self where + pub fn add(mut self, rule: R) -> Self + where R: VotingRule + 'static, { self.rules.push(Box::new(rule)); @@ -288,8 +286,9 @@ impl VotingRulesBuilder where } /// Add all given voting rules to the builder. - pub fn add_all(mut self, rules: I) -> Self where - I: IntoIterator>>, + pub fn add_all(mut self, rules: I) -> Self + where + I: IntoIterator>>, { self.rules.extend(rules); self @@ -298,13 +297,12 @@ impl VotingRulesBuilder where /// Return a new `VotingRule` that applies all of the previously added /// voting rules in-order. pub fn build(self) -> impl VotingRule + Clone { - VotingRules { - rules: Arc::new(self.rules), - } + VotingRules { rules: Arc::new(self.rules) } } } -impl VotingRule for Box> where +impl VotingRule for Box> +where Block: BlockT, B: HeaderBackend, Self: Clone, @@ -358,33 +356,19 @@ mod tests { fn multiple_voting_rules_cannot_restrict_past_base() { // setup an aggregate voting rule composed of two voting rules // where each subtracts 50 blocks from the current target - let rule = VotingRulesBuilder::new() - .add(Subtract(50)) - .add(Subtract(50)) - .build(); + let rule = VotingRulesBuilder::new().add(Subtract(50)).add(Subtract(50)).build(); let mut client = Arc::new(TestClientBuilder::new().build()); for _ in 0..200 { - let block = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } - let genesis = client - .header(&BlockId::Number(0u32.into())) - .unwrap() - .unwrap(); + let genesis = client.header(&BlockId::Number(0u32.into())).unwrap().unwrap(); - let best = client - .header(&BlockId::Hash(client.info().best_hash)) - .unwrap() - .unwrap(); + let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); let (_, number) = futures::executor::block_on(rule.restrict_vote(client.clone(), &genesis, &best, &best)) @@ -394,10 +378,7 @@ mod tests { // which means that we should be voting for block #100 assert_eq!(number, 100); - let block110 = client - .header(&BlockId::Number(110u32.into())) - .unwrap() - .unwrap(); + let block110 = client.header(&BlockId::Number(110u32.into())).unwrap().unwrap(); let (_, number) = futures::executor::block_on(rule.restrict_vote( client.clone(), diff --git a/substrate/client/informant/src/display.rs b/substrate/client/informant/src/display.rs index 0b7f8bcfaf16ba17190965a4a38e316d5873941c..4e91c22f9efd7894539b5da0f1a86d7295c3076e 100644 --- a/substrate/client/informant/src/display.rs +++ b/substrate/client/informant/src/display.rs @@ -40,7 +40,6 @@ use wasm_timer::Instant; /// /// Call `InformantDisplay::new` to initialize the state, then regularly call `display` with the /// information to display. -/// pub struct InformantDisplay { /// Head of chain block number from the last time `display` has been called. /// `None` if `display` has never been called. @@ -84,34 +83,32 @@ impl InformantDisplay { let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; - let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = - if elapsed > 0 { - self.last_total_bytes_inbound = total_bytes_inbound; - self.last_total_bytes_outbound = total_bytes_outbound; - (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) - } else { - (diff_bytes_inbound, diff_bytes_outbound) - }; - - let (level, status, target) = match ( - net_status.sync_state, - net_status.best_seen_block, - net_status.state_sync - ) { - (_, _, Some(state)) => ( - "⚙️ ", - "Downloading state".into(), - format!(", {}%, ({:.2}) Mib", state.percentage, (state.size as f32) / (1024f32 * 1024f32)), - ), - (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading, None, _) => ("⚙️ ", format!("Preparing{}", speed), "".into()), - (SyncState::Downloading, Some(n), None) => ( - "⚙️ ", - format!("Syncing{}", speed), - format!(", target=#{}", n), - ), + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) }; + let (level, status, target) = + match (net_status.sync_state, net_status.best_seen_block, net_status.state_sync) { + (_, _, Some(state)) => ( + "⚙️ ", + "Downloading state".into(), + format!( + ", {}%, ({:.2}) Mib", + state.percentage, + (state.size as f32) / (1024f32 * 1024f32) + ), + ), + (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), + (SyncState::Downloading, None, _) => + ("⚙️ ", format!("Preparing{}", speed), "".into()), + (SyncState::Downloading, Some(n), None) => + ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{}", n)), + }; + if self.format.enable_color { info!( target: "substrate", @@ -151,7 +148,7 @@ impl InformantDisplay { fn speed( best_number: NumberFor, last_number: Option>, - last_update: Instant + last_update: Instant, ) -> String { // Number of milliseconds elapsed since last time. let elapsed_ms = { @@ -164,25 +161,28 @@ fn speed( // Number of blocks that have been imported since last time. let diff = match last_number { None => return String::new(), - Some(n) => best_number.saturating_sub(n) + Some(n) => best_number.saturating_sub(n), }; if let Ok(diff) = TryInto::::try_into(diff) { // If the number of blocks can be converted to a regular integer, then it's easy: just // do the math and turn it into a `f64`. - let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / 10.0; + let speed = diff + .saturating_mul(10_000) + .checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) / + 10.0; format!(" {:4.1} bps", speed) - } else { // If the number of blocks can't be converted to a regular integer, then we need a more // algebraic approach and we stay within the realm of integers. let one_thousand = NumberFor::::from(1_000u32); - let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::MAX) - ); + let elapsed = + NumberFor::::from(>::try_from(elapsed_ms).unwrap_or(u32::MAX)); - let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) + let speed = diff + .saturating_mul(one_thousand) + .checked_div(&elapsed) .unwrap_or_else(Zero::zero); format!(" {} bps", speed) } diff --git a/substrate/client/informant/src/lib.rs b/substrate/client/informant/src/lib.rs index a05ab368e3ed70ed5cba0110e29e4d0ead2564ae..6a91f583cd3df59fc8f69adc3b7962cd0f3c0606 100644 --- a/substrate/client/informant/src/lib.rs +++ b/substrate/client/informant/src/lib.rs @@ -25,10 +25,10 @@ use log::{info, trace, warn}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; use sc_network::NetworkService; +use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; -use sc_transaction_pool_api::TransactionPool; -use std::{fmt::Display, sync::Arc, time::Duration, collections::VecDeque}; +use std::{collections::VecDeque, fmt::Display, sync::Arc, time::Duration}; mod display; @@ -48,9 +48,7 @@ pub struct OutputFormat { impl Default for OutputFormat { fn default() -> Self { - Self { - enable_color: true, - } + Self { enable_color: true } } } @@ -74,8 +72,7 @@ pub async fn build( network: Arc::Hash>>, pool: Arc, format: OutputFormat, -) -where +) where C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, { @@ -131,19 +128,19 @@ where client.import_notification_stream().for_each(move |n| { // detect and log reorganizations. if let Some((ref last_num, ref last_hash)) = last_best { - if n.header.parent_hash() != last_hash && n.is_new_best { - let maybe_ancestor = sp_blockchain::lowest_common_ancestor( - &*client, - last_hash.clone(), - n.hash, - ); + if n.header.parent_hash() != last_hash && n.is_new_best { + let maybe_ancestor = + sp_blockchain::lowest_common_ancestor(&*client, last_hash.clone(), n.hash); match maybe_ancestor { Ok(ref ancestor) if ancestor.hash != *last_hash => info!( "♻️ Reorg on #{},{} to #{},{}, common ancestor #{},{}", - Colour::Red.bold().paint(format!("{}", last_num)), last_hash, - Colour::Green.bold().paint(format!("{}", n.header.number())), n.hash, - Colour::White.bold().paint(format!("{}", ancestor.number)), ancestor.hash, + Colour::Red.bold().paint(format!("{}", last_num)), + last_hash, + Colour::Green.bold().paint(format!("{}", n.header.number())), + n.hash, + Colour::White.bold().paint(format!("{}", ancestor.number)), + ancestor.hash, ), Ok(_) => {}, Err(e) => warn!("Error computing tree route: {}", e), @@ -155,7 +152,6 @@ where last_best = Some((n.header.number().clone(), n.hash.clone())); } - // If we already printed a message for a given block recently, // we should not print it again. if !last_blocks.contains(&n.hash) { diff --git a/substrate/client/keystore/src/lib.rs b/substrate/client/keystore/src/lib.rs index 38ab640d2e3034a176f5fc7aa4e6a48835477d32..5e29f691997e69d824c62892687d18fd7317338b 100644 --- a/substrate/client/keystore/src/lib.rs +++ b/substrate/client/keystore/src/lib.rs @@ -19,9 +19,9 @@ //! Keystore (and session key management) for ed25519 based chains like Polkadot. #![warn(missing_docs)] -use std::io; use sp_core::crypto::KeyTypeId; use sp_keystore::Error as TraitError; +use std::io; /// Local keystore implementation mod local; @@ -35,19 +35,19 @@ pub enum Error { /// JSON error. Json(serde_json::Error), /// Invalid password. - #[display(fmt="Invalid password")] + #[display(fmt = "Invalid password")] InvalidPassword, /// Invalid BIP39 phrase - #[display(fmt="Invalid recovery phrase (BIP39) data")] + #[display(fmt = "Invalid recovery phrase (BIP39) data")] InvalidPhrase, /// Invalid seed - #[display(fmt="Invalid seed")] + #[display(fmt = "Invalid seed")] InvalidSeed, /// Public key type is not supported - #[display(fmt="Key crypto type is not supported")] + #[display(fmt = "Key crypto type is not supported")] KeyNotSupported(KeyTypeId), /// Keystore unavailable - #[display(fmt="Keystore unavailable")] + #[display(fmt = "Keystore unavailable")] Unavailable, } @@ -58,9 +58,8 @@ impl From for TraitError { fn from(error: Error) -> Self { match error { Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), - Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => { - TraitError::ValidationError(error.to_string()) - }, + Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => + TraitError::ValidationError(error.to_string()), Error::Unavailable => TraitError::Unavailable, Error::Io(e) => TraitError::Other(e.to_string()), Error::Json(e) => TraitError::Other(e.to_string()), @@ -77,4 +76,3 @@ impl std::error::Error for Error { } } } - diff --git a/substrate/client/keystore/src/local.rs b/substrate/client/keystore/src/local.rs index 2377ea127756efb62030560e464d2912714b8c10..53f4785fb691bc4adc114c0f6b8838fac1471c89 100644 --- a/substrate/client/keystore/src/local.rs +++ b/substrate/client/keystore/src/local.rs @@ -17,30 +17,27 @@ // //! Local keystore implementation -use std::{ - collections::{HashMap, HashSet}, - fs::{self, File}, - io::Write, - path::PathBuf, - sync::Arc, -}; use async_trait::async_trait; use parking_lot::RwLock; +use sp_application_crypto::{ecdsa, ed25519, sr25519, AppKey, AppPair, IsWrappedBy}; use sp_core::{ - crypto::{CryptoTypePublicPair, KeyTypeId, Pair as PairT, ExposeSecret, SecretString, Public}, - sr25519::{Public as Sr25519Public, Pair as Sr25519Pair}, + crypto::{CryptoTypePublicPair, ExposeSecret, KeyTypeId, Pair as PairT, Public, SecretString}, + sr25519::{Pair as Sr25519Pair, Public as Sr25519Public}, Encode, }; use sp_keystore::{ - CryptoStore, - SyncCryptoStorePtr, - Error as TraitError, - SyncCryptoStore, - vrf::{VRFTranscriptData, VRFSignature, make_transcript}, + vrf::{make_transcript, VRFSignature, VRFTranscriptData}, + CryptoStore, Error as TraitError, SyncCryptoStore, SyncCryptoStorePtr, +}; +use std::{ + collections::{HashMap, HashSet}, + fs::{self, File}, + io::Write, + path::PathBuf, + sync::Arc, }; -use sp_application_crypto::{ed25519, sr25519, ecdsa, AppPair, AppKey, IsWrappedBy}; -use crate::{Result, Error}; +use crate::{Error, Result}; /// A local based keystore that is either memory-based or filesystem-based. pub struct LocalKeystore(RwLock); @@ -62,14 +59,20 @@ impl LocalKeystore { /// /// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists and /// `Err(_)` when something failed. - pub fn key_pair(&self, public: &::Public) -> Result> { + pub fn key_pair( + &self, + public: &::Public, + ) -> Result> { self.0.read().key_pair::(public) } } #[async_trait] impl CryptoStore for LocalKeystore { - async fn keys(&self, id: KeyTypeId) -> std::result::Result, TraitError> { + async fn keys( + &self, + id: KeyTypeId, + ) -> std::result::Result, TraitError> { SyncCryptoStore::keys(self, id) } @@ -109,7 +112,12 @@ impl CryptoStore for LocalKeystore { SyncCryptoStore::ecdsa_generate_new(self, id, seed) } - async fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> std::result::Result<(), ()> { + async fn insert_unknown( + &self, + id: KeyTypeId, + suri: &str, + public: &[u8], + ) -> std::result::Result<(), ()> { SyncCryptoStore::insert_unknown(self, id, suri, public) } @@ -154,28 +162,22 @@ impl CryptoStore for LocalKeystore { } impl SyncCryptoStore for LocalKeystore { - fn keys( - &self, - id: KeyTypeId - ) -> std::result::Result, TraitError> { + fn keys(&self, id: KeyTypeId) -> std::result::Result, TraitError> { let raw_keys = self.0.read().raw_public_keys(id)?; - Ok(raw_keys.into_iter() - .fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k)); - v - })) + Ok(raw_keys.into_iter().fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k)); + v + })) } fn supported_keys( &self, id: KeyTypeId, - keys: Vec + keys: Vec, ) -> std::result::Result, TraitError> { - let all_keys = SyncCryptoStore::keys(self, id)? - .into_iter() - .collect::>(); + let all_keys = SyncCryptoStore::keys(self, id)?.into_iter().collect::>(); Ok(keys.into_iter().filter(|key| all_keys.contains(key)).collect::>()) } @@ -188,36 +190,40 @@ impl SyncCryptoStore for LocalKeystore { match key.0 { ed25519::CRYPTO_ID => { let pub_key = ed25519::Public::from_slice(key.1.as_slice()); - let key_pair = self.0.read() + let key_pair = self + .0 + .read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - } + }, sr25519::CRYPTO_ID => { let pub_key = sr25519::Public::from_slice(key.1.as_slice()); - let key_pair = self.0.read() + let key_pair = self + .0 + .read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() }, ecdsa::CRYPTO_ID => { let pub_key = ecdsa::Public::from_slice(key.1.as_slice()); - let key_pair = self.0.read() + let key_pair = self + .0 + .read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - } - _ => Err(TraitError::KeyNotSupported(id)) + }, + _ => Err(TraitError::KeyNotSupported(id)), } } fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0.read().raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| sr25519::Public::from_slice(k.as_slice())) - .collect() - }) + self.0 + .read() + .raw_public_keys(key_type) + .map(|v| v.into_iter().map(|k| sr25519::Public::from_slice(k.as_slice())).collect()) .unwrap_or_default() } @@ -227,20 +233,20 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; + } + .map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0.read().raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| ed25519::Public::from_slice(k.as_slice())) - .collect() - }) + self.0 + .read() + .raw_public_keys(key_type) + .map(|v| v.into_iter().map(|k| ed25519::Public::from_slice(k.as_slice())).collect()) .unwrap_or_default() } @@ -250,20 +256,20 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; + } + .map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } fn ecdsa_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.0.read().raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| ecdsa::Public::from_slice(k.as_slice())) - .collect() - }) + self.0 + .read() + .raw_public_keys(key_type) + .map(|v| v.into_iter().map(|k| ecdsa::Public::from_slice(k.as_slice())).collect()) .unwrap_or_default() } @@ -273,21 +279,27 @@ impl SyncCryptoStore for LocalKeystore { seed: Option<&str>, ) -> std::result::Result { let pair = match seed { - Some(seed) => self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), + Some(seed) => + self.0.write().insert_ephemeral_from_seed_by_type::(seed, id), None => self.0.write().generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; + } + .map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } - fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) - -> std::result::Result<(), ()> - { + fn insert_unknown( + &self, + key_type: KeyTypeId, + suri: &str, + public: &[u8], + ) -> std::result::Result<(), ()> { self.0.write().insert_unknown(key_type, suri, public).map_err(|_| ()) } fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys.iter() + public_keys + .iter() .all(|(p, t)| self.0.read().key_phrase_by_type(&p, *t).ok().flatten().is_some()) } @@ -302,10 +314,7 @@ impl SyncCryptoStore for LocalKeystore { if let Some(pair) = pair { let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(Some(VRFSignature { - output: inout.to_output(), - proof, - })) + Ok(Some(VRFSignature { output: inout.to_output(), proof })) } else { Ok(None) } @@ -317,9 +326,8 @@ impl SyncCryptoStore for LocalKeystore { public: &ecdsa::Public, msg: &[u8; 32], ) -> std::result::Result, TraitError> { - let pair = self.0.read() - .key_pair_by_type::(public, id)?; - + let pair = self.0.read().key_pair_by_type::(public, id)?; + pair.map(|k| k.sign_prehashed(msg)).map(Ok).transpose() } } @@ -362,26 +370,16 @@ impl KeystoreInner { /// Get the password for this store. fn password(&self) -> Option<&str> { - self.password.as_ref() - .map(|p| p.expose_secret()) - .map(|p| p.as_str()) + self.password.as_ref().map(|p| p.expose_secret()).map(|p| p.as_str()) } /// Create a new in-memory store. fn new_in_memory() -> Self { - Self { - path: None, - additional: HashMap::new(), - password: None - } + Self { path: None, additional: HashMap::new(), password: None } } /// Get the key phrase for the given public key and key type from the in-memory store. - fn get_additional_pair( - &self, - public: &[u8], - key_type: KeyTypeId, - ) -> Option<&String> { + fn get_additional_pair(&self, public: &[u8], key_type: KeyTypeId) -> Option<&String> { let key = (key_type, public.to_vec()); self.additional.get(&key) } @@ -444,7 +442,7 @@ impl KeystoreInner { let path = if let Some(path) = self.key_file_path(public, key_type) { path } else { - return Ok(None); + return Ok(None) }; if path.exists() { @@ -468,10 +466,7 @@ impl KeystoreInner { return Ok(None) }; - let pair = Pair::from_string( - &phrase, - self.password(), - ).map_err(|_| Error::InvalidPhrase)?; + let pair = Pair::from_string(&phrase, self.password()).map_err(|_| Error::InvalidPhrase)?; if &pair.public() == public { Ok(Some(pair)) @@ -493,7 +488,9 @@ impl KeystoreInner { /// Returns a list of raw public keys filtered by `KeyTypeId` fn raw_public_keys(&self, id: KeyTypeId) -> Result>> { - let mut public_keys: Vec> = self.additional.keys() + let mut public_keys: Vec> = self + .additional + .keys() .into_iter() .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) .collect(); @@ -508,11 +505,11 @@ impl KeystoreInner { match hex::decode(name) { Ok(ref hex) if hex.len() > 4 => { if &hex[0..4] != &id.0 { - continue; + continue } let public = hex[4..].to_vec(); public_keys.push(public); - } + }, _ => continue, } } @@ -526,42 +523,34 @@ impl KeystoreInner { /// /// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists or `Err(_)` when /// something failed. - pub fn key_pair(&self, public: &::Public) -> Result> { + pub fn key_pair( + &self, + public: &::Public, + ) -> Result> { self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID) .map(|v| v.map(Into::into)) } } - #[cfg(test)] mod tests { use super::*; - use tempfile::TempDir; - use sp_core::{ - Pair, - crypto::Ss58Codec, - testing::SR25519, - }; use sp_application_crypto::{ed25519, sr25519, AppPublic}; - use std::{ - fs, - str::FromStr, - }; + use sp_core::{crypto::Ss58Codec, testing::SR25519, Pair}; + use std::{fs, str::FromStr}; + use tempfile::TempDir; const TEST_KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); impl KeystoreInner { fn insert_ephemeral_from_seed(&mut self, seed: &str) -> Result { - self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) + self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID) + .map(Into::into) } fn public_keys(&self) -> Result> { self.raw_public_keys(Public::ID) - .map(|v| { - v.into_iter() - .map(|k| Public::from_slice(k.as_slice())) - .collect() - }) + .map(|v| v.into_iter().map(|k| Public::from_slice(k.as_slice())).collect()) } fn generate(&mut self) -> Result { @@ -592,23 +581,23 @@ mod tests { let key: ed25519::AppPair = store.0.write().generate().unwrap(); let key2 = ed25519::Pair::generate().0; - assert!( - !SyncCryptoStore::has_keys(&store, &[(key2.public().to_vec(), ed25519::AppPublic::ID)]) - ); + assert!(!SyncCryptoStore::has_keys( + &store, + &[(key2.public().to_vec(), ed25519::AppPublic::ID)] + )); - assert!( - !SyncCryptoStore::has_keys( - &store, - &[ - (key2.public().to_vec(), ed25519::AppPublic::ID), - (key.public().to_raw_vec(), ed25519::AppPublic::ID), - ], - ) - ); + assert!(!SyncCryptoStore::has_keys( + &store, + &[ + (key2.public().to_vec(), ed25519::AppPublic::ID), + (key.public().to_raw_vec(), ed25519::AppPublic::ID), + ], + )); - assert!( - SyncCryptoStore::has_keys(&store, &[(key.public().to_raw_vec(), ed25519::AppPublic::ID)]) - ); + assert!(SyncCryptoStore::has_keys( + &store, + &[(key.public().to_raw_vec(), ed25519::AppPublic::ID)] + )); } #[test] @@ -616,9 +605,11 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let mut store = KeystoreInner::open(temp_dir.path(), None).unwrap(); - let pair: ed25519::AppPair = store.insert_ephemeral_from_seed( - "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc" - ).unwrap(); + let pair: ed25519::AppPair = store + .insert_ephemeral_from_seed( + "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc", + ) + .unwrap(); assert_eq!( "5DKUrgFqCPV8iAXx9sjy1nyBygQCeiUYRFWurZGhnrn3HJCA", pair.public().to_ss58check() @@ -637,7 +628,8 @@ mod tests { let mut store = KeystoreInner::open( temp_dir.path(), Some(FromStr::from_str(password.as_str()).unwrap()), - ).unwrap(); + ) + .unwrap(); let pair: ed25519::AppPair = store.generate().unwrap(); assert_eq!( @@ -652,7 +644,8 @@ mod tests { let store = KeystoreInner::open( temp_dir.path(), Some(FromStr::from_str(password.as_str()).unwrap()), - ).unwrap(); + ) + .unwrap(); assert_eq!( pair.public(), store.key_pair::(&pair.public()).unwrap().unwrap().public(), @@ -667,9 +660,15 @@ mod tests { let mut keys = Vec::new(); for i in 0..10 { keys.push(store.generate::().unwrap().public()); - keys.push(store.insert_ephemeral_from_seed::( - &format!("0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", i), - ).unwrap().public()); + keys.push( + store + .insert_ephemeral_from_seed::(&format!( + "0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", + i + )) + .unwrap() + .public(), + ); } // Generate a key of a different type @@ -690,16 +689,14 @@ mod tests { let secret_uri = "//Alice"; let key_pair = sr25519::AppPair::from_string(secret_uri, None).expect("Generates key pair"); - store.insert_unknown( - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); + store + .insert_unknown(SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); - let store_key_pair = store.key_pair_by_type::( - &key_pair.public(), - SR25519, - ).expect("Gets key pair from keystore").unwrap(); + let store_key_pair = store + .key_pair_by_type::(&key_pair.public(), SR25519) + .expect("Gets key pair from keystore") + .unwrap(); assert_eq!(key_pair.public(), store_key_pair.public()); } @@ -712,16 +709,15 @@ mod tests { let file_name = temp_dir.path().join(hex::encode(&SR25519.0[..2])); fs::write(file_name, "test").expect("Invalid file is written"); - assert!( - SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty(), - ); + assert!(SyncCryptoStore::sr25519_public_keys(&store, SR25519).is_empty(),); } #[test] fn generate_with_seed_is_not_stored() { let temp_dir = TempDir::new().unwrap(); let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); - let _alice_tmp_key = SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); + let _alice_tmp_key = + SyncCryptoStore::sr25519_generate_new(&store, TEST_KEY_TYPE, Some("//Alice")).unwrap(); assert_eq!(SyncCryptoStore::sr25519_public_keys(&store, TEST_KEY_TYPE).len(), 1); diff --git a/substrate/client/light/src/backend.rs b/substrate/client/light/src/backend.rs index 425720c1d7770e1d7796f876b5eeb10a26dd5efd..87d7dba3ddfb1e4965a8d5af78b4a2a9155279a8 100644 --- a/substrate/client/light/src/backend.rs +++ b/substrate/client/light/src/backend.rs @@ -19,38 +19,44 @@ //! Light client backend. Only stores headers and justifications of blocks. //! Everything else is requested from full nodes on demand. -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; use parking_lot::RwLock; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use codec::{Decode, Encode}; -use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_core::offchain::storage::InMemOffchainStorage; -use sp_state_machine::{ - Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, IndexOperation, -}; -use sp_runtime::{generic::BlockId, Justification, Justifications, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HashFor}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use super::blockchain::Blockchain; +use hash_db::Hasher; use sc_client_api::{ backend::{ - AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState, - PrunableStateChangesTrieStorage, - }, - blockchain::{ - HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys, + AuxStore, Backend as ClientBackend, BlockImportOperation, NewBlockState, + PrunableStateChangesTrieStorage, RemoteBackend, }, - light::Storage as BlockchainStorage, + blockchain::{well_known_cache_keys, HeaderBackend as BlockchainHeaderBackend}, in_mem::check_genesis_storage, + light::Storage as BlockchainStorage, UsageInfo, }; -use super::blockchain::Blockchain; -use hash_db::Hasher; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_core::{ + offchain::storage::InMemOffchainStorage, + storage::{well_known_keys, ChildInfo}, + ChangesTrieConfiguration, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, + Justification, Justifications, Storage, +}; +use sp_state_machine::{ + Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, + IndexOperation, StorageCollection, TrieBackend, +}; -const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; +const IN_MEMORY_EXPECT_PROOF: &str = + "InMemory state backend has Void error type and always succeeds; qed"; /// Light client backend. pub struct Backend { @@ -84,11 +90,7 @@ pub enum GenesisOrUnavailableState { impl Backend { /// Create new light backend. pub fn new(blockchain: Arc>) -> Self { - Self { - blockchain, - genesis_state: RwLock::new(None), - import_lock: Default::default(), - } + Self { blockchain, genesis_state: RwLock::new(None), import_lock: Default::default() } } /// Get shared blockchain reference. @@ -102,9 +104,13 @@ impl AuxStore for Backend { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { self.blockchain.storage().insert_aux(insert, delete) } @@ -114,10 +120,10 @@ impl AuxStore for Backend { } impl ClientBackend for Backend> - where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, +where + Block: BlockT, + S: BlockchainStorage, + Block::Hash: Ord, { type BlockImportOperation = ImportOperation; type Blockchain = Blockchain; @@ -141,15 +147,12 @@ impl ClientBackend for Backend> fn begin_state_operation( &self, _operation: &mut Self::BlockImportOperation, - _block: BlockId + _block: BlockId, ) -> ClientResult<()> { Ok(()) } - fn commit_operation( - &self, - mut operation: Self::BlockImportOperation, - ) -> ClientResult<()> { + fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { if !operation.finalized_blocks.is_empty() { for block in operation.finalized_blocks { self.blockchain.storage().finalize_header(block)?; @@ -159,7 +162,9 @@ impl ClientBackend for Backend> if let Some(header) = operation.header { let is_genesis_import = header.number().is_zero(); if let Some(new_config) = operation.changes_trie_config_update { - operation.cache.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); + operation + .cache + .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); } self.blockchain.storage().import_header( header, @@ -175,11 +180,12 @@ impl ClientBackend for Backend> } else { for (key, maybe_val) in operation.aux_ops { match maybe_val { - Some(val) => self.blockchain.storage().insert_aux( - &[(&key[..], &val[..])], - std::iter::empty(), - )?, - None => self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, + Some(val) => self + .blockchain + .storage() + .insert_aux(&[(&key[..], &val[..])], std::iter::empty())?, + None => + self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, } } } @@ -229,7 +235,7 @@ impl ClientBackend for Backend> // special case for genesis block if block_number.is_zero() { if let Some(genesis_state) = self.genesis_state.read().clone() { - return Ok(GenesisOrUnavailableState::Genesis(genesis_state)); + return Ok(GenesisOrUnavailableState::Genesis(genesis_state)) } } @@ -246,10 +252,7 @@ impl ClientBackend for Backend> Err(ClientError::NotAvailableOnLightClient) } - fn remove_leaf_block( - &self, - _hash: &Block::Hash, - ) -> ClientResult<()> { + fn remove_leaf_block(&self, _hash: &Block::Hash) -> ClientResult<()> { Err(ClientError::NotAvailableOnLightClient) } @@ -265,8 +268,9 @@ where Block::Hash: Ord, { fn is_local_state_available(&self, block: &BlockId) -> bool { - self.genesis_state.read().is_some() - && self.blockchain.expect_block_number_from_id(block) + self.genesis_state.read().is_some() && + self.blockchain + .expect_block_number_from_id(block) .map(|num| num.is_zero()) .unwrap_or(false) } @@ -277,10 +281,10 @@ where } impl BlockImportOperation for ImportOperation - where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, +where + Block: BlockT, + S: BlockchainStorage, + Block::Hash: Ord, { type State = GenesisOrUnavailableState>; @@ -326,10 +330,14 @@ impl BlockImportOperation for ImportOperation check_genesis_storage(&input)?; // changes trie configuration - let changes_trie_config = input.top.iter() + let changes_trie_config = input + .top + .iter() .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) - .map(|(_, v)| Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis")); + .map(|(_, v)| { + Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis") + }); self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck @@ -337,7 +345,8 @@ impl BlockImportOperation for ImportOperation storage.insert(None, input.top); // create a list of children keys to re-compute roots for - let child_delta = input.children_default + let child_delta = input + .children_default .iter() .map(|(_storage_key, storage_child)| (&storage_child.child_info, std::iter::empty())); @@ -360,7 +369,8 @@ impl BlockImportOperation for ImportOperation } fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where I: IntoIterator, Option>)> + where + I: IntoIterator, Option>)>, { self.aux_ops.append(&mut ops.into_iter().collect()); Ok(()) @@ -389,7 +399,10 @@ impl BlockImportOperation for ImportOperation Ok(()) } - fn update_transaction_index(&mut self, _index: Vec) -> sp_blockchain::Result<()> { + fn update_transaction_index( + &mut self, + _index: Vec, + ) -> sp_blockchain::Result<()> { // noop for the light client Ok(()) } @@ -405,8 +418,8 @@ impl std::fmt::Debug for GenesisOrUnavailableState { } impl StateBackend for GenesisOrUnavailableState - where - H::Out: Ord + codec::Codec, +where + H::Out: Ord + codec::Codec, { type Error = ClientError; type Transaction = as StateBackend>::Transaction; @@ -420,11 +433,7 @@ impl StateBackend for GenesisOrUnavailableState } } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> ClientResult>> { + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> ClientResult>> { match *self { GenesisOrUnavailableState::Genesis(ref state) => Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), @@ -446,24 +455,24 @@ impl StateBackend for GenesisOrUnavailableState key: &[u8], ) -> Result>, Self::Error> { match *self { - GenesisOrUnavailableState::Genesis(ref state) => Ok( - state.next_child_storage_key(child_info, key) - .expect(IN_MEMORY_EXPECT_PROOF) - ), + GenesisOrUnavailableState::Genesis(ref state) => + Ok(state.next_child_storage_key(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.for_keys_with_prefix(prefix, action), + GenesisOrUnavailableState::Genesis(ref state) => + state.for_keys_with_prefix(prefix, action), GenesisOrUnavailableState::Unavailable => (), } } fn for_key_values_with_prefix(&self, prefix: &[u8], action: A) { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.for_key_values_with_prefix(prefix, action), + GenesisOrUnavailableState::Genesis(ref state) => + state.for_key_values_with_prefix(prefix, action), GenesisOrUnavailableState::Unavailable => (), } } @@ -477,9 +486,9 @@ impl StateBackend for GenesisOrUnavailableState allow_missing: bool, ) -> ClientResult { match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.apply_to_key_values_while(child_info, prefix, start_at, action, allow_missing) - .expect(IN_MEMORY_EXPECT_PROOF)), + GenesisOrUnavailableState::Genesis(ref state) => Ok(state + .apply_to_key_values_while(child_info, prefix, start_at, action, allow_missing) + .expect(IN_MEMORY_EXPECT_PROOF)), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } @@ -512,11 +521,13 @@ impl StateBackend for GenesisOrUnavailableState fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.storage_root(delta), + GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -524,15 +535,17 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { match *self { GenesisOrUnavailableState::Genesis(ref state) => { let (root, is_equal, _) = state.child_storage_root(child_info, delta); (root, is_equal, Default::default()) }, - GenesisOrUnavailableState::Unavailable => - (H::Out::default(), true, Default::default()), + GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), } } @@ -550,7 +563,7 @@ impl StateBackend for GenesisOrUnavailableState } } - fn register_overlay_stats(&self, _stats: &sp_state_machine::StateMachineStats) { } + fn register_overlay_stats(&self, _stats: &sp_state_machine::StateMachineStats) {} fn usage_info(&self) -> sp_state_machine::UsageInfo { sp_state_machine::UsageInfo::empty() diff --git a/substrate/client/light/src/blockchain.rs b/substrate/client/light/src/blockchain.rs index 242839833a541b68b31d3412c359111377384dcf..e88c724193697280a64562f53ece901c1cf180f3 100644 --- a/substrate/client/light/src/blockchain.rs +++ b/substrate/client/light/src/blockchain.rs @@ -21,27 +21,25 @@ use std::sync::Arc; -use sp_runtime::{Justifications, generic::BlockId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; - -use sp_blockchain::{ - HeaderMetadata, CachedHeaderMetadata, Error as ClientError, Result as ClientResult, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, + Justifications, }; + +use crate::fetcher::RemoteHeaderRequest; pub use sc_client_api::{ - backend::{ - AuxStore, NewBlockState, ProvideChtRoots, - }, + backend::{AuxStore, NewBlockState, ProvideChtRoots}, blockchain::{ - Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, + well_known_cache_keys, Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, - well_known_cache_keys, - }, - light::{ - RemoteBlockchain, LocalOrRemote, Storage }, cht, + light::{LocalOrRemote, RemoteBlockchain, Storage}, +}; +use sp_blockchain::{ + CachedHeaderMetadata, Error as ClientError, HeaderMetadata, Result as ClientResult, }; -use crate::fetcher::RemoteHeaderRequest; /// Light client blockchain. pub struct Blockchain { @@ -51,9 +49,7 @@ pub struct Blockchain { impl Blockchain { /// Create new light blockchain backed with given storage. pub fn new(storage: S) -> Self { - Self { - storage, - } + Self { storage } } /// Get storage reference. @@ -62,7 +58,11 @@ impl Blockchain { } } -impl BlockchainHeaderBackend for Blockchain where Block: BlockT, S: Storage { +impl BlockchainHeaderBackend for Blockchain +where + Block: BlockT, + S: Storage, +{ fn header(&self, id: BlockId) -> ClientResult> { match RemoteBlockchain::header(self, id)? { LocalOrRemote::Local(header) => Ok(Some(header)), @@ -83,15 +83,25 @@ impl BlockchainHeaderBackend for Blockchain where Block: Blo self.storage.number(hash) } - fn hash(&self, number: <::Header as HeaderT>::Number) -> ClientResult> { + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> ClientResult> { self.storage.hash(number) } } -impl HeaderMetadata for Blockchain where Block: BlockT, S: Storage { +impl HeaderMetadata for Blockchain +where + Block: BlockT, + S: Storage, +{ type Error = ClientError; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { self.storage.header_metadata(hash) } @@ -104,7 +114,11 @@ impl HeaderMetadata for Blockchain where Block: BlockT, S: S } } -impl BlockchainBackend for Blockchain where Block: BlockT, S: Storage { +impl BlockchainBackend for Blockchain +where + Block: BlockT, + S: Storage, +{ fn body(&self, _id: BlockId) -> ClientResult>> { Err(ClientError::NotAvailableOnLightClient) } @@ -129,16 +143,13 @@ impl BlockchainBackend for Blockchain where Block: BlockT, S Err(ClientError::NotAvailableOnLightClient) } - fn indexed_transaction( - &self, - _hash: &Block::Hash, - ) -> ClientResult>> { + fn indexed_transaction(&self, _hash: &Block::Hash) -> ClientResult>> { Err(ClientError::NotAvailableOnLightClient) } fn block_indexed_body( &self, - _id: BlockId + _id: BlockId, ) -> sp_blockchain::Result>>> { Err(ClientError::NotAvailableOnLightClient) } @@ -151,16 +162,16 @@ impl, Block: BlockT> ProvideCache for Blockchain { } impl RemoteBlockchain for Blockchain - where - S: Storage, +where + S: Storage, { - fn header(&self, id: BlockId) -> ClientResult, - >> { + fn header( + &self, + id: BlockId, + ) -> ClientResult>> { // first, try to read header from local storage if let Some(local_header) = self.storage.header(id)? { - return Ok(LocalOrRemote::Local(local_header)); + return Ok(LocalOrRemote::Local(local_header)) } // we need to know block number to check if it's a part of CHT @@ -173,8 +184,9 @@ impl RemoteBlockchain for Blockchain }; // if the header is genesis (never pruned), non-canonical, or from future => return - if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown { - return Ok(LocalOrRemote::Unknown); + if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown + { + return Ok(LocalOrRemote::Unknown) } Ok(LocalOrRemote::Remote(RemoteHeaderRequest { diff --git a/substrate/client/light/src/call_executor.rs b/substrate/client/light/src/call_executor.rs index c9ca3bab37bef195c70d5175a5011c44bf91f23e..f666d8363127ff45b1e2fda65ae47c2aa6604ba0 100644 --- a/substrate/client/light/src/call_executor.rs +++ b/substrate/client/light/src/call_executor.rs @@ -18,34 +18,33 @@ //! Methods that light client could use to execute runtime calls. -use std::{ - sync::Arc, panic::UnwindSafe, result, cell::RefCell, -}; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use hash_db::Hasher; use sp_core::{ - convert_hash, NativeOrEncoded, traits::{CodeExecutor, SpawnNamed}, + convert_hash, + traits::{CodeExecutor, SpawnNamed}, + NativeOrEncoded, }; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, Header as HeaderT, HashFor}, + generic::BlockId, + traits::{Block as BlockT, HashFor, Header as HeaderT}, }; -use sp_externalities::Extensions; use sp_state_machine::{ - self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, + self, create_proof_check_backend, execution_proof_check_on_trie_backend, + Backend as StateBackend, ExecutionManager, ExecutionStrategy, OverlayedChanges, StorageProof, }; -use hash_db::Hasher; use sp_api::{ProofRecorder, StorageTransactionCache}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::RemoteBackend, - light::RemoteCallRequest, - call_executor::CallExecutor, + backend::RemoteBackend, call_executor::CallExecutor, light::RemoteCallRequest, }; -use sc_executor::{RuntimeVersion, NativeVersion}; +use sc_executor::{NativeVersion, RuntimeVersion}; /// Call executor that is able to execute calls only on genesis state. /// @@ -64,19 +63,15 @@ impl GenesisCallExecutor { impl Clone for GenesisCallExecutor { fn clone(&self) -> Self { - GenesisCallExecutor { - backend: self.backend.clone(), - local: self.local.clone(), - } + GenesisCallExecutor { backend: self.backend.clone(), local: self.local.clone() } } } -impl CallExecutor for - GenesisCallExecutor - where - Block: BlockT, - B: RemoteBackend, - Local: CallExecutor, +impl CallExecutor for GenesisCallExecutor +where + Block: BlockT, + B: RemoteBackend, + Local: CallExecutor, { type Error = ClientError; @@ -99,7 +94,7 @@ impl CallExecutor for fn contextual_call< EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, @@ -114,7 +109,10 @@ impl CallExecutor for native_call: Option, recorder: &Option>, extensions: Option, - ) -> ClientResult> where ExecutionManager: Clone { + ) -> ClientResult> + where + ExecutionManager: Clone, + { // there's no actual way/need to specify native/wasm execution strategy on light node // => we can safely ignore passed values @@ -125,7 +123,7 @@ impl CallExecutor for Result, Local::Error>, ) -> Result, Local::Error>, _, - NC + NC, >( &self.local, at, @@ -137,7 +135,8 @@ impl CallExecutor for native_call, recorder, extensions, - ).map_err(|e| ClientError::Execution(Box::new(e.to_string()))), + ) + .map_err(|e| ClientError::Execution(Box::new(e.to_string()))), false => Err(ClientError::NotAvailableOnLightClient), } } @@ -174,24 +173,19 @@ pub fn prove_execution( method: &str, call_data: &[u8], ) -> ClientResult<(Vec, StorageProof)> - where - Block: BlockT, - S: StateBackend>, - E: CallExecutor, +where + Block: BlockT, + S: StateBackend>, + E: CallExecutor, { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as - Box - )?; + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; // execute method + record execution proof - let (result, exec_proof) = executor.prove_at_trie_state( - &trie_state, - &mut Default::default(), - method, - call_data, - )?; + let (result, exec_proof) = + executor.prove_at_trie_state(&trie_state, &mut Default::default(), method, call_data)?; Ok((result, exec_proof)) } @@ -205,11 +199,11 @@ pub fn check_execution_proof( request: &RemoteCallRequest

, remote_proof: StorageProof, ) -> ClientResult> - where - Header: HeaderT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, +where + Header: HeaderT, + E: CodeExecutor + Clone + 'static, + H: Hasher, + H::Out: Ord + codec::Codec + 'static, { let local_state_root = request.header.state_root(); let root: H::Out = convert_hash(&local_state_root); @@ -220,7 +214,8 @@ pub fn check_execution_proof( // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code.runtime_code() + let runtime_code = backend_runtime_code + .runtime_code() .map_err(|_e| ClientError::RuntimeCodeMissing)?; // execute method diff --git a/substrate/client/light/src/fetcher.rs b/substrate/client/light/src/fetcher.rs index e39cfe07fbf5ea46a480fc55c2626cb304108713..fcdc7ad7ba59645dabd6ae1d2da493369da7faba 100644 --- a/substrate/client/light/src/fetcher.rs +++ b/substrate/client/light/src/fetcher.rs @@ -18,34 +18,39 @@ //! Light client data fetcher. Fetches requested data from remote full nodes. -use std::sync::Arc; -use std::collections::{BTreeMap, HashMap}; -use std::marker::PhantomData; +use std::{ + collections::{BTreeMap, HashMap}, + marker::PhantomData, + sync::Arc, +}; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; -use sp_core::{convert_hash, traits::{CodeExecutor, SpawnNamed}, storage::{ChildInfo, ChildType}}; +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_core::{ + convert_hash, + storage::{ChildInfo, ChildType}, + traits::{CodeExecutor, SpawnNamed}, +}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, - AtLeast32Bit, CheckedConversion, + AtLeast32Bit, Block as BlockT, CheckedConversion, Hash, HashFor, Header as HeaderT, NumberFor, }; +pub use sp_state_machine::StorageProof; use sp_state_machine::{ - ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, - InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, + key_changes_proof_check_with_db, read_child_proof_check, read_proof_check, + ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, + InMemoryChangesTrieStorage, TrieBackend, }; -pub use sp_state_machine::StorageProof; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use crate::{blockchain::Blockchain, call_executor::check_execution_proof}; pub use sc_client_api::{ + cht, light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, - RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, + ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, + RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, Storage as BlockchainStorage, }, - cht, }; -use crate::{blockchain::Blockchain, call_executor::check_execution_proof}; /// Remote data checker. pub struct LightDataChecker> { @@ -62,9 +67,7 @@ impl> LightDataChecker { executor: E, spawn_handle: Box, ) -> Self { - Self { - blockchain, executor, spawn_handle, _hasher: PhantomData - } + Self { blockchain, executor, spawn_handle, _hasher: PhantomData } } /// Check remote changes query proof assuming that CHT-s are of given size. @@ -74,26 +77,39 @@ impl> LightDataChecker { remote_proof: ChangesProof, cht_size: NumberFor, ) -> ClientResult, u32)>> - where - H: Hasher, - H::Out: Ord + codec::Codec, + where + H: Hasher, + H::Out: Ord + codec::Codec, { // since we need roots of all changes tries for the range begin..max // => remote node can't use max block greater that one that we have passed - if remote_proof.max_block > request.max_block.0 || remote_proof.max_block < request.last_block.0 { + if remote_proof.max_block > request.max_block.0 || + remote_proof.max_block < request.last_block.0 + { return Err(ClientError::ChangesTrieAccessFailed(format!( "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", - remote_proof.max_block, request.first_block.0, request.last_block.0, request.max_block.0, - )).into()); + remote_proof.max_block, + request.first_block.0, + request.last_block.0, + request.max_block.0, + )) + .into()) } // check if remote node has responded with extra changes trie roots proofs // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) - let is_extra_first_root = remote_proof.roots.keys().next() - .map(|first_root| *first_root < request.first_block.0 - || *first_root >= request.tries_roots.0) + let is_extra_first_root = remote_proof + .roots + .keys() + .next() + .map(|first_root| { + *first_root < request.first_block.0 || *first_root >= request.tries_roots.0 + }) .unwrap_or(false); - let is_extra_last_root = remote_proof.roots.keys().next_back() + let is_extra_last_root = remote_proof + .roots + .keys() + .next_back() .map(|last_root| *last_root >= request.tries_roots.0) .unwrap_or(false); if is_extra_first_root || is_extra_last_root { @@ -112,11 +128,7 @@ impl> LightDataChecker { let remote_roots_proof = remote_proof.roots_proof; let remote_proof = remote_proof.proof; if !remote_roots.is_empty() { - self.check_changes_tries_proof( - cht_size, - &remote_roots, - remote_roots_proof, - )?; + self.check_changes_tries_proof(cht_size, &remote_roots, remote_roots_proof)?; } // and now check the key changes proof + get the changes @@ -125,7 +137,10 @@ impl> LightDataChecker { for config_range in &request.changes_trie_configs { let result_range = key_changes_proof_check_with_db::( ChangesTrieConfigurationRange { - config: config_range.config.as_ref().ok_or(ClientError::ChangesTriesNotSupported)?, + config: config_range + .config + .as_ref() + .ok_or(ClientError::ChangesTriesNotSupported)?, zero: config_range.zero.0, end: config_range.end.map(|(n, _)| n), }, @@ -141,7 +156,8 @@ impl> LightDataChecker { }, remote_max_block, request.storage_key.as_ref(), - &request.key) + &request.key, + ) .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; result.extend(result_range); } @@ -156,9 +172,9 @@ impl> LightDataChecker { remote_roots: &BTreeMap, B::Hash>, remote_roots_proof: StorageProof, ) -> ClientResult<()> - where - H: Hasher, - H::Out: Ord + codec::Codec, + where + H: Hasher, + H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage let storage = remote_roots_proof.into_memory_db(); @@ -166,52 +182,62 @@ impl> LightDataChecker { // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT let blocks = remote_roots.keys().cloned(); - cht::for_each_cht_group::(cht_size, blocks, |mut storage, _, cht_blocks| { - // get local changes trie CHT root for given CHT - // it should be there, because it is never pruned AND request has been composed - // when required header has been pruned (=> replaced with CHT) - let first_block = cht_blocks.first().cloned() - .expect("for_each_cht_group never calls callback with empty groups"); - let local_cht_root = self.blockchain.storage().changes_trie_cht_root(cht_size, first_block)? - .ok_or(ClientError::InvalidCHTProof)?; - - // check changes trie root for every block within CHT range - for block in cht_blocks { - // check if the proofs storage contains the root - // normally this happens in when the proving backend is created, but since - // we share the storage for multiple checks, do it here - let mut cht_root = H::Out::default(); - cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); - if !storage.contains(&cht_root, EMPTY_PREFIX) { - return Err(ClientError::InvalidCHTProof.into()); + cht::for_each_cht_group::( + cht_size, + blocks, + |mut storage, _, cht_blocks| { + // get local changes trie CHT root for given CHT + // it should be there, because it is never pruned AND request has been composed + // when required header has been pruned (=> replaced with CHT) + let first_block = cht_blocks + .first() + .cloned() + .expect("for_each_cht_group never calls callback with empty groups"); + let local_cht_root = self + .blockchain + .storage() + .changes_trie_cht_root(cht_size, first_block)? + .ok_or(ClientError::InvalidCHTProof)?; + + // check changes trie root for every block within CHT range + for block in cht_blocks { + // check if the proofs storage contains the root + // normally this happens in when the proving backend is created, but since + // we share the storage for multiple checks, do it here + let mut cht_root = H::Out::default(); + cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); + if !storage.contains(&cht_root, EMPTY_PREFIX) { + return Err(ClientError::InvalidCHTProof.into()) + } + + // check proof for single changes trie root + let proving_backend = TrieBackend::new(storage, cht_root); + let remote_changes_trie_root = remote_roots[&block]; + cht::check_proof_on_proving_backend::( + local_cht_root, + block, + remote_changes_trie_root, + &proving_backend, + )?; + + // and return the storage to use in following checks + storage = proving_backend.into_storage(); } - // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, cht_root); - let remote_changes_trie_root = remote_roots[&block]; - cht::check_proof_on_proving_backend::( - local_cht_root, - block, - remote_changes_trie_root, - &proving_backend, - )?; - - // and return the storage to use in following checks - storage = proving_backend.into_storage(); - } - - Ok(storage) - }, storage) + Ok(storage) + }, + storage, + ) } } impl FetchChecker for LightDataChecker - where - Block: BlockT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, - S: BlockchainStorage, +where + Block: BlockT, + E: CodeExecutor + Clone + 'static, + H: Hasher, + H::Out: Ord + codec::Codec + 'static, + S: BlockchainStorage, { fn check_header_proof( &self, @@ -219,15 +245,16 @@ impl FetchChecker for LightDataChecker remote_header: Option, remote_proof: StorageProof, ) -> ClientResult { - let remote_header = remote_header.ok_or_else(|| - ClientError::from(ClientError::InvalidCHTProof))?; + let remote_header = + remote_header.ok_or_else(|| ClientError::from(ClientError::InvalidCHTProof))?; let remote_header_hash = remote_header.hash(); cht::check_proof::( request.cht_root, request.block, remote_header_hash, remote_proof, - ).map(|_| remote_header) + ) + .map(|_| remote_header) } fn check_read_proof( @@ -239,7 +266,8 @@ impl FetchChecker for LightDataChecker convert_hash(request.header.state_root()), remote_proof, request.keys.iter(), - ).map_err(|e| ClientError::from(e)) + ) + .map_err(|e| ClientError::from(e)) } fn check_read_child_proof( @@ -256,7 +284,8 @@ impl FetchChecker for LightDataChecker remote_proof, &child_info, request.keys.iter(), - ).map_err(|e| ClientError::from(e)) + ) + .map_err(|e| ClientError::from(e)) } fn check_execution_proof( @@ -275,7 +304,7 @@ impl FetchChecker for LightDataChecker fn check_changes_proof( &self, request: &RemoteChangesRequest, - remote_proof: ChangesProof + remote_proof: ChangesProof, ) -> ClientResult, u32)>> { self.check_changes_proof_with_cht_size(request, remote_proof, cht::size()) } @@ -283,12 +312,11 @@ impl FetchChecker for LightDataChecker fn check_body_proof( &self, request: &RemoteBodyRequest, - body: Vec + body: Vec, ) -> ClientResult> { // TODO: #2621 - let extrinsics_root = HashFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - ); + let extrinsics_root = + HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); if *request.header.extrinsics_root() == extrinsics_root { Ok(body) } else { @@ -297,7 +325,6 @@ impl FetchChecker for LightDataChecker expected: extrinsics_root.to_string(), }) } - } } @@ -308,10 +335,18 @@ struct RootsStorage<'a, Number: AtLeast32Bit, Hash: 'a> { } impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> - where - H: Hasher, - Number: std::fmt::Display + std::hash::Hash + Clone + AtLeast32Bit + Encode + Decode + Send + Sync + 'static, - Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, +where + H: Hasher, + Number: std::fmt::Display + + std::hash::Hash + + Clone + + AtLeast32Bit + + Encode + + Decode + + Send + + Sync + + 'static, + Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, { fn build_anchor( &self, @@ -329,7 +364,8 @@ impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a let root = if block < self.roots.0 { self.prev_roots.get(&Number::unique_saturated_from(block)).cloned() } else { - let index: Option = block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); + let index: Option = + block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); index.and_then(|index| self.roots.1.get(index as usize).cloned()) }; diff --git a/substrate/client/light/src/lib.rs b/substrate/client/light/src/lib.rs index e647b8743cc0f8865a84b793f9f6fc621dfda8a9..ed48c05258d0ffd179a6b8cbeee2bfe789645aa0 100644 --- a/substrate/client/light/src/lib.rs +++ b/substrate/client/light/src/lib.rs @@ -18,16 +18,19 @@ //! Light client components. +use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_runtime::traits::{Block as BlockT, HashFor}; use std::sync::Arc; -use sp_core::traits::{CodeExecutor, SpawnNamed}; pub mod backend; pub mod blockchain; pub mod call_executor; pub mod fetcher; -pub use {backend::*, blockchain::*, call_executor::*, fetcher::*}; +pub use backend::*; +pub use blockchain::*; +pub use call_executor::*; +pub use fetcher::*; /// Create an instance of fetch data checker. pub fn new_fetch_checker>( @@ -35,8 +38,8 @@ pub fn new_fetch_checker>( executor: E, spawn_handle: Box, ) -> LightDataChecker, B, S> - where - E: CodeExecutor, +where + E: CodeExecutor, { LightDataChecker::new(blockchain, executor, spawn_handle) } @@ -48,9 +51,9 @@ pub fn new_light_blockchain>(storage: S) -> A /// Create an instance of light client backend. pub fn new_light_backend(blockchain: Arc>) -> Arc>> - where - B: BlockT, - S: BlockchainStorage, +where + B: BlockT, + S: BlockchainStorage, { Arc::new(Backend::new(blockchain)) } diff --git a/substrate/client/network-gossip/src/bridge.rs b/substrate/client/network-gossip/src/bridge.rs index fd9aac96c01026d34dd846c11c4654f2c8bf8581..9871b7efb39af268f9883b2c711a65528c0fcd3e 100644 --- a/substrate/client/network-gossip/src/bridge.rs +++ b/substrate/client/network-gossip/src/bridge.rs @@ -16,13 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{Network, Validator}; -use crate::state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}; +use crate::{ + state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}, + Network, Validator, +}; use sc_network::{Event, ReputationChange}; -use futures::prelude::*; -use futures::channel::mpsc::{channel, Sender, Receiver}; +use futures::{ + channel::mpsc::{channel, Receiver, Sender}, + prelude::*, +}; use libp2p::PeerId; use log::trace; use prometheus_endpoint::Registry; @@ -74,7 +78,10 @@ impl GossipEngine { protocol: impl Into>, validator: Arc>, metrics_registry: Option<&Registry>, - ) -> Self where B: 'static { + ) -> Self + where + B: 'static, + { let protocol = protocol.into(); let network_event_stream = network.event_stream(); @@ -99,11 +106,7 @@ impl GossipEngine { /// the message's topic. No validation is performed on the message, if the /// message is already expired it should be dropped on the next garbage /// collection. - pub fn register_gossip_message( - &mut self, - topic: B::Hash, - message: Vec, - ) { + pub fn register_gossip_message(&mut self, topic: B::Hash, message: Vec) { self.state_machine.register_message(topic, message); } @@ -113,9 +116,7 @@ impl GossipEngine { } /// Get data of valid, incoming messages for a topic (but might have expired meanwhile). - pub fn messages_for(&mut self, topic: B::Hash) - -> Receiver - { + pub fn messages_for(&mut self, topic: B::Hash) -> Receiver { let past_messages = self.state_machine.messages_for(topic).collect::>(); // The channel length is not critical for correctness. By the implementation of `channel` // each sender is guaranteed a single buffer slot, making it a non-rendezvous channel and @@ -124,7 +125,7 @@ impl GossipEngine { // contains a single message. let (mut tx, rx) = channel(usize::max(past_messages.len(), 10)); - for notification in past_messages{ + for notification in past_messages { tx.try_send(notification) .expect("receiver known to be live, and buffer size known to suffice; qed"); } @@ -135,22 +136,12 @@ impl GossipEngine { } /// Send all messages with given topic to a peer. - pub fn send_topic( - &mut self, - who: &PeerId, - topic: B::Hash, - force: bool - ) { + pub fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { self.state_machine.send_topic(&mut *self.network, who, topic, force) } /// Multicast a message to all peers. - pub fn gossip_message( - &mut self, - topic: B::Hash, - message: Vec, - force: bool, - ) { + pub fn gossip_message(&mut self, topic: B::Hash, message: Vec, force: bool) { self.state_machine.multicast(&mut *self.network, topic, message, force) } @@ -184,30 +175,33 @@ impl Future for GossipEngine { Poll::Ready(Some(event)) => match event { Event::SyncConnected { remote } => { this.network.add_set_reserved(remote, this.protocol.clone()); - } + }, Event::SyncDisconnected { remote } => { this.network.remove_set_reserved(remote, this.protocol.clone()); - } + }, Event::NotificationStreamOpened { remote, protocol, role, .. } => { if protocol != this.protocol { - continue; + continue } this.state_machine.new_peer(&mut *this.network, remote, role); - } + }, Event::NotificationStreamClosed { remote, protocol } => { if protocol != this.protocol { - continue; + continue } this.state_machine.peer_disconnected(&mut *this.network, remote); }, Event::NotificationsReceived { remote, messages } => { - let messages = messages.into_iter().filter_map(|(engine, data)| { - if engine == this.protocol { - Some(data.to_vec()) - } else { - None - } - }).collect(); + let messages = messages + .into_iter() + .filter_map(|(engine, data)| { + if engine == this.protocol { + Some(data.to_vec()) + } else { + None + } + }) + .collect(); let to_forward = this.state_machine.on_incoming( &mut *this.network, @@ -217,27 +211,25 @@ impl Future for GossipEngine { this.forwarding_state = ForwardingState::Busy(to_forward.into()); }, - Event::Dht(_) => {} - } + Event::Dht(_) => {}, + }, // The network event stream closed. Do the same for [`GossipValidator`]. Poll::Ready(None) => return Poll::Ready(()), Poll::Pending => break, } - } + }, ForwardingState::Busy(to_forward) => { let (topic, notification) = match to_forward.pop_front() { Some(n) => n, None => { this.forwarding_state = ForwardingState::Idle; - continue; - } + continue + }, }; let sinks = match this.message_sinks.get_mut(&topic) { Some(sinks) => sinks, - None => { - continue; - }, + None => continue, }; // Make sure all sinks for the given topic are ready. @@ -249,8 +241,8 @@ impl Future for GossipEngine { Poll::Pending => { // Push back onto queue for later. to_forward.push_front((topic, notification)); - break 'outer; - } + break 'outer + }, } } @@ -259,7 +251,7 @@ impl Future for GossipEngine { if sinks.is_empty() { this.message_sinks.remove(&topic); - continue; + continue } trace!( @@ -271,18 +263,16 @@ impl Future for GossipEngine { for sink in sinks { match sink.start_send(notification.clone()) { Ok(()) => {}, - Err(e) if e.is_full() => unreachable!( - "Previously ensured that all sinks are ready; qed.", - ), + Err(e) if e.is_full() => + unreachable!("Previously ensured that all sinks are ready; qed.",), // Receiver got dropped. Will be removed in next iteration (See (1)). Err(_) => {}, } } - } + }, } } - while let Poll::Ready(()) = this.periodic_maintenance_interval.poll_unpin(cx) { this.periodic_maintenance_interval.reset(PERIODIC_MAINTENANCE_INTERVAL); this.state_machine.tick(&mut *this.network); @@ -299,17 +289,23 @@ impl Future for GossipEngine { #[cfg(test)] mod tests { - use async_std::task::spawn; + use super::*; use crate::{ValidationResult, ValidatorContext}; - use futures::{channel::mpsc::{unbounded, UnboundedSender}, executor::{block_on, block_on_stream}, future::poll_fn}; + use async_std::task::spawn; + use futures::{ + channel::mpsc::{unbounded, UnboundedSender}, + executor::{block_on, block_on_stream}, + future::poll_fn, + }; use quickcheck::{Arbitrary, Gen, QuickCheck}; use sc_network::ObservedRole; - use sp_runtime::{testing::H256, traits::{Block as BlockT}}; - use std::borrow::Cow; - use std::convert::TryInto; - use std::sync::{Arc, Mutex}; + use sp_runtime::{testing::H256, traits::Block as BlockT}; + use std::{ + borrow::Cow, + convert::TryInto, + sync::{Arc, Mutex}, + }; use substrate_test_runtime_client::runtime::Block; - use super::*; #[derive(Clone, Default)] struct TestNetwork { @@ -329,18 +325,15 @@ mod tests { Box::pin(rx) } - fn report_peer(&self, _: PeerId, _: ReputationChange) { - } + fn report_peer(&self, _: PeerId, _: ReputationChange) {} fn disconnect_peer(&self, _: PeerId, _: Cow<'static, str>) { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { - } + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { - } + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); @@ -405,32 +398,32 @@ mod tests { None, ); - let mut event_sender = network.inner.lock() - .unwrap() - .event_senders - .pop() - .unwrap(); + let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); // Register the remote peer. - event_sender.start_send( - Event::NotificationStreamOpened { + event_sender + .start_send(Event::NotificationStreamOpened { remote: remote_peer.clone(), protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, - } - ).expect("Event stream is unbounded; qed."); + }) + .expect("Event stream is unbounded; qed."); let messages = vec![vec![1], vec![2]]; - let events = messages.iter().cloned().map(|m| { - Event::NotificationsReceived { + let events = messages + .iter() + .cloned() + .map(|m| Event::NotificationsReceived { remote: remote_peer.clone(), - messages: vec![(protocol.clone(), m.into())] - } - }).collect::>(); + messages: vec![(protocol.clone(), m.into())], + }) + .collect::>(); // Send first event before subscribing. - event_sender.start_send(events[0].clone()).expect("Event stream is unbounded; qed."); + event_sender + .start_send(events[0].clone()) + .expect("Event stream is unbounded; qed."); let mut subscribers = vec![]; for _ in 0..2 { @@ -438,13 +431,14 @@ mod tests { } // Send second event after subscribing. - event_sender.start_send(events[1].clone()).expect("Event stream is unbounded; qed."); + event_sender + .start_send(events[1].clone()) + .expect("Event stream is unbounded; qed."); spawn(gossip_engine); - let mut subscribers = subscribers.into_iter() - .map(|s| block_on_stream(s)) - .collect::>(); + let mut subscribers = + subscribers.into_iter().map(|s| block_on_stream(s)).collect::>(); // Expect each subscriber to receive both events. for message in messages { @@ -463,7 +457,7 @@ mod tests { #[test] fn forwarding_to_different_size_and_topic_channels() { #[derive(Clone, Debug)] - struct ChannelLengthAndTopic{ + struct ChannelLengthAndTopic { length: usize, topic: H256, } @@ -486,7 +480,7 @@ mod tests { topic: H256, } - impl Arbitrary for Message{ + impl Arbitrary for Message { fn arbitrary(g: &mut Gen) -> Self { let possible_topics = (0..10).collect::>(); Self { @@ -517,13 +511,16 @@ mod tests { let remote_peer = PeerId::random(); let network = TestNetwork::default(); - let num_channels_per_topic = channels.iter() - .fold(HashMap::new(), |mut acc, ChannelLengthAndTopic { topic, .. }| { + let num_channels_per_topic = channels.iter().fold( + HashMap::new(), + |mut acc, ChannelLengthAndTopic { topic, .. }| { acc.entry(topic).and_modify(|e| *e += 1).or_insert(1); acc - }); + }, + ); - let expected_msgs_per_topic_all_chan = notifications.iter() + let expected_msgs_per_topic_all_chan = notifications + .iter() .fold(HashMap::new(), |mut acc, messages| { for message in messages { acc.entry(message.topic).and_modify(|e| *e += 1).or_insert(1); @@ -545,12 +542,12 @@ mod tests { ); // Create channels. - let (txs, mut rxs) = channels.iter() - .map(|ChannelLengthAndTopic { length, topic }| { - (topic.clone(), channel(*length)) - }) + let (txs, mut rxs) = channels + .iter() + .map(|ChannelLengthAndTopic { length, topic }| (topic.clone(), channel(*length))) .fold((vec![], vec![]), |mut acc, (topic, (tx, rx))| { - acc.0.push((topic, tx)); acc.1.push((topic, rx)); + acc.0.push((topic, tx)); + acc.1.push((topic, rx)); acc }); @@ -560,30 +557,27 @@ mod tests { Some(entry) => entry.push(tx), None => { gossip_engine.message_sinks.insert(topic, vec![tx]); - } + }, } } - - let mut event_sender = network.inner.lock() - .unwrap() - .event_senders - .pop() - .unwrap(); + let mut event_sender = network.inner.lock().unwrap().event_senders.pop().unwrap(); // Register the remote peer. - event_sender.start_send( - Event::NotificationStreamOpened { + event_sender + .start_send(Event::NotificationStreamOpened { remote: remote_peer.clone(), protocol: protocol.clone(), negotiated_fallback: None, role: ObservedRole::Authority, - } - ).expect("Event stream is unbounded; qed."); + }) + .expect("Event stream is unbounded; qed."); // Send messages into the network event stream. for (i_notification, messages) in notifications.iter().enumerate() { - let messages = messages.into_iter().enumerate() + let messages = messages + .into_iter() + .enumerate() .map(|(i_message, Message { topic })| { // Embed the topic in the first 256 bytes of the message to be extracted by // the [`TestValidator`] later on. @@ -595,12 +589,15 @@ mod tests { message.push(i_message.try_into().unwrap()); (protocol.clone(), message.into()) - }).collect(); - - event_sender.start_send(Event::NotificationsReceived { - remote: remote_peer.clone(), - messages, - }).expect("Event stream is unbounded; qed."); + }) + .collect(); + + event_sender + .start_send(Event::NotificationsReceived { + remote: remote_peer.clone(), + messages, + }) + .expect("Event stream is unbounded; qed."); } let mut received_msgs_per_topic_all_chan = HashMap::::new(); @@ -621,19 +618,19 @@ mod tests { match rx.poll_next_unpin(cx) { Poll::Ready(Some(_)) => { progress = true; - received_msgs_per_topic_all_chan.entry(*topic) + received_msgs_per_topic_all_chan + .entry(*topic) .and_modify(|e| *e += 1) .or_insert(1); }, - Poll::Ready(None) => unreachable!( - "Sender side of channel is never dropped", - ), + Poll::Ready(None) => + unreachable!("Sender side of channel is never dropped",), Poll::Pending => {}, } } if !progress { - break; + break } } Poll::Ready(()) @@ -655,10 +652,10 @@ mod tests { } // Past regressions. - prop(vec![], vec![vec![Message{ topic: H256::default()}]]); + prop(vec![], vec![vec![Message { topic: H256::default() }]]); prop( - vec![ChannelLengthAndTopic {length: 71, topic: H256::default()}], - vec![vec![Message{ topic: H256::default()}]], + vec![ChannelLengthAndTopic { length: 71, topic: H256::default() }], + vec![vec![Message { topic: H256::default() }]], ); QuickCheck::new().quickcheck(prop as fn(_, _)) diff --git a/substrate/client/network-gossip/src/lib.rs b/substrate/client/network-gossip/src/lib.rs index f8b6e8f0c2fdc05ebc2b298b084aa7ce2be0a4c7..45fc19d6ef8acce1f5197df53dd1a2481a6d175e 100644 --- a/substrate/client/network-gossip/src/lib.rs +++ b/substrate/client/network-gossip/src/lib.rs @@ -61,13 +61,15 @@ //! These status packets will typically contain light pieces of information //! used to inform peers of a current view of protocol state. -pub use self::bridge::GossipEngine; -pub use self::state_machine::TopicNotification; -pub use self::validator::{DiscardAll, MessageIntent, Validator, ValidatorContext, ValidationResult}; +pub use self::{ + bridge::GossipEngine, + state_machine::TopicNotification, + validator::{DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext}, +}; use futures::prelude::*; use sc_network::{multiaddr, Event, ExHashT, NetworkService, PeerId, ReputationChange}; -use sp_runtime::{traits::Block as BlockT}; +use sp_runtime::traits::Block as BlockT; use std::{borrow::Cow, iter, pin::Pin, sync::Arc}; mod bridge; @@ -111,18 +113,23 @@ impl Network for Arc> { } fn add_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { - let addr = iter::once(multiaddr::Protocol::P2p(who.into())) - .collect::(); - let result = NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); + let addr = + iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); + let result = + NetworkService::add_peers_to_reserved_set(self, protocol, iter::once(addr).collect()); if let Err(err) = result { log::error!(target: "gossip", "add_set_reserved failed: {}", err); } } fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { - let addr = iter::once(multiaddr::Protocol::P2p(who.into())) - .collect::(); - let result = NetworkService::remove_peers_from_reserved_set(self, protocol, iter::once(addr).collect()); + let addr = + iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); + let result = NetworkService::remove_peers_from_reserved_set( + self, + protocol, + iter::once(addr).collect(), + ); if let Err(err) = result { log::error!(target: "gossip", "remove_set_reserved failed: {}", err); } diff --git a/substrate/client/network-gossip/src/state_machine.rs b/substrate/client/network-gossip/src/state_machine.rs index ea1a336585981b7684a2eb7793a1f4eba0d2302f..5cda52b9db493ac45fa51a854f8b64aa7a0e0f59 100644 --- a/substrate/client/network-gossip/src/state_machine.rs +++ b/substrate/client/network-gossip/src/state_machine.rs @@ -16,18 +16,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{Network, MessageIntent, Validator, ValidatorContext, ValidationResult}; +use crate::{MessageIntent, Network, ValidationResult, Validator, ValidatorContext}; -use std::borrow::Cow; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use std::iter; -use std::time; -use lru::LruCache; use libp2p::PeerId; +use lru::LruCache; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; use sc_network::ObservedRole; +use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + iter, + sync::Arc, + time, +}; use wasm_timer::Instant; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 @@ -87,17 +89,13 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { /// Broadcast a message to all peers that have not received it previously. fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.gossip.multicast( - self.network, - topic, - message, - force, - ); + self.gossip.multicast(self.network, topic, message, force); } /// Send addressed message to a peer. fn send_message(&mut self, who: &PeerId, message: Vec) { - self.network.write_notification(who.clone(), self.gossip.protocol.clone(), message); + self.network + .write_notification(who.clone(), self.gossip.protocol.clone(), message); } /// Send all messages with given topic to a peer. @@ -114,8 +112,9 @@ fn propagate<'a, B: BlockT, I>( peers: &mut HashMap>, validator: &Arc>, ) - // (msg_hash, topic, message) - where I: Clone + IntoIterator)>, +// (msg_hash, topic, message) +where + I: Clone + IntoIterator)>, { let mut message_allowed = validator.message_allowed(); @@ -124,7 +123,7 @@ fn propagate<'a, B: BlockT, I>( let intent = match intent { MessageIntent::Broadcast { .. } => if peer.known_messages.contains(&message_hash) { - continue; + continue } else { MessageIntent::Broadcast }, @@ -140,7 +139,7 @@ fn propagate<'a, B: BlockT, I>( }; if !message_allowed(id, intent, &topic, &message) { - continue; + continue } peer.known_messages.insert(message_hash.clone()); @@ -180,7 +179,7 @@ impl ConsensusGossip { Some(Err(e)) => { tracing::debug!(target: "gossip", "Failed to register metrics: {:?}", e); None - } + }, None => None, }; @@ -204,9 +203,7 @@ impl ConsensusGossip { ?role, "Registering peer", ); - self.peers.insert(who.clone(), PeerConsensus { - known_messages: HashSet::new(), - }); + self.peers.insert(who.clone(), PeerConsensus { known_messages: HashSet::new() }); let validator = self.validator.clone(); let mut context = NetworkContext { gossip: self, network }; @@ -221,12 +218,7 @@ impl ConsensusGossip { sender: Option, ) { if self.known_messages.put(message_hash.clone(), ()).is_none() { - self.messages.push(MessageEntry { - message_hash, - topic, - message, - sender, - }); + self.messages.push(MessageEntry { message_hash, topic, message, sender }); if let Some(ref metrics) = self.metrics { metrics.registered_messages.inc(); @@ -239,11 +231,7 @@ impl ConsensusGossip { /// the message's topic. No validation is performed on the message, if the /// message is already expired it should be dropped on the next garbage /// collection. - pub fn register_message( - &mut self, - topic: B::Hash, - message: Vec, - ) { + pub fn register_message(&mut self, topic: B::Hash, message: Vec) { let message_hash = HashFor::::hash(&message[..]); self.register_message_hashed(message_hash, topic, message, None); } @@ -267,7 +255,9 @@ impl ConsensusGossip { /// Rebroadcast all messages to all peers. fn rebroadcast(&mut self, network: &mut dyn Network) { - let messages = self.messages.iter() + let messages = self + .messages + .iter() .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); propagate( network, @@ -275,20 +265,28 @@ impl ConsensusGossip { messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, - &self.validator + &self.validator, ); } /// Broadcast all messages with given topic. pub fn broadcast_topic(&mut self, network: &mut dyn Network, topic: B::Hash, force: bool) { - let messages = self.messages.iter() - .filter_map(|entry| - if entry.topic == topic { - Some((&entry.message_hash, &entry.topic, &entry.message)) - } else { None } - ); + let messages = self.messages.iter().filter_map(|entry| { + if entry.topic == topic { + Some((&entry.message_hash, &entry.topic, &entry.message)) + } else { + None + } + }); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.protocol.clone(), messages, intent, &mut self.peers, &self.validator); + propagate( + network, + self.protocol.clone(), + messages, + intent, + &mut self.peers, + &self.validator, + ); } /// Prune old or no longer relevant consensus messages. Provide a predicate @@ -298,8 +296,7 @@ impl ConsensusGossip { let before = self.messages.len(); let mut message_expired = self.validator.message_expired(); - self.messages - .retain(|entry| !message_expired(entry.topic, &entry.message)); + self.messages.retain(|entry| !message_expired(entry.topic, &entry.message)); let expired_messages = before - self.messages.len(); @@ -323,10 +320,13 @@ impl ConsensusGossip { /// Get valid messages received in the past for a topic (might have expired meanwhile). pub fn messages_for(&mut self, topic: B::Hash) -> impl Iterator + '_ { - self.messages.iter().filter(move |e| e.topic == topic).map(|entry| TopicNotification { - message: entry.message.clone(), - sender: entry.sender.clone(), - }) + self.messages + .iter() + .filter(move |e| e.topic == topic) + .map(|entry| TopicNotification { + message: entry.message.clone(), + sender: entry.sender.clone(), + }) } /// Register incoming messages and return the ones that are new and valid (according to a gossip @@ -360,7 +360,7 @@ impl ConsensusGossip { "Ignored already known message", ); network.report_peer(who.clone(), rep::DUPLICATE_GOSSIP); - continue; + continue } // validate the message @@ -380,7 +380,7 @@ impl ConsensusGossip { protocol = %self.protocol, "Discard message from peer", ); - continue; + continue }, }; @@ -393,24 +393,19 @@ impl ConsensusGossip { protocol = %self.protocol, "Got message from unregistered peer", ); - continue; - } + continue + }, }; network.report_peer(who.clone(), rep::GOSSIP_SUCCESS); peer.known_messages.insert(message_hash); - to_forward.push((topic, TopicNotification { - message: message.clone(), - sender: Some(who.clone()) - })); + to_forward.push(( + topic, + TopicNotification { message: message.clone(), sender: Some(who.clone()) }, + )); if keep { - self.register_message_hashed( - message_hash, - topic, - message, - Some(who.clone()), - ); + self.register_message_hashed(message_hash, topic, message, Some(who.clone())); } } @@ -423,24 +418,21 @@ impl ConsensusGossip { network: &mut dyn Network, who: &PeerId, topic: B::Hash, - force: bool + force: bool, ) { let mut message_allowed = self.validator.message_allowed(); if let Some(ref mut peer) = self.peers.get_mut(who) { for entry in self.messages.iter().filter(|m| m.topic == topic) { - let intent = if force { - MessageIntent::ForcedBroadcast - } else { - MessageIntent::Broadcast - }; + let intent = + if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; if !force && peer.known_messages.contains(&entry.message_hash) { - continue; + continue } if !message_allowed(who, intent, &entry.topic, &entry.message) { - continue; + continue } peer.known_messages.insert(entry.message_hash.clone()); @@ -452,7 +444,11 @@ impl ConsensusGossip { ?entry.message, "Sending topic message", ); - network.write_notification(who.clone(), self.protocol.clone(), entry.message.clone()); + network.write_notification( + who.clone(), + self.protocol.clone(), + entry.message.clone(), + ); } } } @@ -474,18 +470,13 @@ impl ConsensusGossip { iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, - &self.validator + &self.validator, ); } /// Send addressed message to a peer. The message is not kept or multicast /// later on. - pub fn send_message( - &mut self, - network: &mut dyn Network, - who: &PeerId, - message: Vec, - ) { + pub fn send_message(&mut self, network: &mut dyn Network, who: &PeerId, message: Vec) { let peer = match self.peers.get_mut(who) { None => return, Some(peer) => peer, @@ -534,11 +525,15 @@ impl Metrics { #[cfg(test)] mod tests { + use super::*; use futures::prelude::*; use sc_network::{Event, ReputationChange}; - use sp_runtime::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; - use std::{borrow::Cow, pin::Pin, sync::{Arc, Mutex}}; - use super::*; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + use std::{ + borrow::Cow, + pin::Pin, + sync::{Arc, Mutex}, + }; type Block = RawBlock>; @@ -552,7 +547,7 @@ mod tests { sender: None, }); } - } + }; } struct AllowAll; @@ -568,7 +563,7 @@ mod tests { } struct DiscardAll; - impl Validator for DiscardAll{ + impl Validator for DiscardAll { fn validate( &self, _context: &mut dyn ValidatorContext, @@ -602,11 +597,9 @@ mod tests { unimplemented!(); } - fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { - } + fn add_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} - fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) { - } + fn remove_set_reserved(&self, _: PeerId, _: Cow<'static, str>) {} fn write_notification(&self, _: PeerId, _: Cow<'static, str>, _: Vec) { unimplemented!(); @@ -677,7 +670,7 @@ mod tests { assert_eq!( consensus.messages_for(topic).next(), - Some(TopicNotification { message: message, sender: None }), + Some(TopicNotification { message, sender: None }), ); } @@ -712,15 +705,12 @@ mod tests { #[test] fn on_incoming_ignores_discarded_messages() { let to_forward = ConsensusGossip::::new(Arc::new(DiscardAll), "/foo".into(), None) - .on_incoming( - &mut NoOpNetwork::default(), - PeerId::random(), - vec![vec![1, 2, 3]], - ); + .on_incoming(&mut NoOpNetwork::default(), PeerId::random(), vec![vec![1, 2, 3]]); assert!( to_forward.is_empty(), - "Expected `on_incoming` to ignore discarded message but got {:?}", to_forward, + "Expected `on_incoming` to ignore discarded message but got {:?}", + to_forward, ); } diff --git a/substrate/client/network-gossip/src/validator.rs b/substrate/client/network-gossip/src/validator.rs index 4b5440c1a06f3c7d7f6fa38b84a64a87e3543629..9a2652d03f6429afc91528455f420b0620ebe502 100644 --- a/substrate/client/network-gossip/src/validator.rs +++ b/substrate/client/network-gossip/src/validator.rs @@ -26,15 +26,14 @@ pub trait Validator: Send + Sync { } /// New connection is dropped. - fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) { - } + fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) {} /// Validate consensus message. fn validate( &self, context: &mut dyn ValidatorContext, sender: &PeerId, - data: &[u8] + data: &[u8], ) -> ValidationResult; /// Produce a closure for validating messages on a given topic. @@ -43,7 +42,9 @@ pub trait Validator: Send + Sync { } /// Produce a closure for filtering egress messages. - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { Box::new(move |_who, _intent, _topic, _data| true) } } @@ -99,7 +100,9 @@ impl Validator for DiscardAll { Box::new(move |_topic, _data| true) } - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { Box::new(move |_who, _intent, _topic, _data| false) } } diff --git a/substrate/client/network/build.rs b/substrate/client/network/build.rs index 0eea622e8757430be6a81def823454261c9decf7..6e5b83d4e58aea90d284db2af17519bd6eeda520 100644 --- a/substrate/client/network/build.rs +++ b/substrate/client/network/build.rs @@ -1,8 +1,5 @@ -const PROTOS: &[&str] = &[ - "src/schema/api.v1.proto", - "src/schema/light.v1.proto", - "src/schema/bitswap.v1.2.0.proto", -]; +const PROTOS: &[&str] = + &["src/schema/api.v1.proto", "src/schema/light.v1.proto", "src/schema/bitswap.v1.2.0.proto"]; fn main() { prost_build::compile_protos(PROTOS, &["src/schema"]).unwrap(); diff --git a/substrate/client/network/src/behaviour.rs b/substrate/client/network/src/behaviour.rs index 576c49d1da366867f237d25348c49594e7fef162..37dfc0cf99c24d8395542c0b95be843d872309d1 100644 --- a/substrate/client/network/src/behaviour.rs +++ b/substrate/client/network/src/behaviour.rs @@ -17,27 +17,33 @@ // along with this program. If not, see . use crate::{ - config::ProtocolId, bitswap::Bitswap, + config::ProtocolId, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + light_client_requests, peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, - peer_info, request_responses, light_client_requests, - ObservedRole, DhtEvent, + request_responses, DhtEvent, ObservedRole, }; use bytes::Bytes; use futures::{channel::oneshot, stream::StreamExt}; -use libp2p::NetworkBehaviour; -use libp2p::core::{Multiaddr, PeerId, PublicKey}; -use libp2p::identify::IdentifyInfo; -use libp2p::kad::record; -use libp2p::swarm::{ - NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters, toggle::Toggle +use libp2p::{ + core::{Multiaddr, PeerId, PublicKey}, + identify::IdentifyInfo, + kad::record, + swarm::{toggle::Toggle, NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}, + NetworkBehaviour, }; use log::debug; use prost::Message; -use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justifications}; +use sp_consensus::{ + import_queue::{IncomingBlock, Origin}, + BlockOrigin, +}; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + Justifications, +}; use std::{ borrow::Cow, collections::{HashSet, VecDeque}, @@ -47,8 +53,7 @@ use std::{ }; pub use crate::request_responses::{ - ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, RequestId, - IfDisconnected + IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, RequestId, ResponseFailure, }; /// General behaviour of the network. Combines all protocols together. @@ -210,8 +215,9 @@ impl Behaviour { peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), discovery: disco_config.finish(), bitswap: bitswap.into(), - request_responses: - request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, + request_responses: request_responses::RequestResponsesBehaviour::new( + request_response_protocols.into_iter(), + )?, light_client_request_sender, events: VecDeque::new(), block_request_protocol_name, @@ -233,7 +239,9 @@ impl Behaviour { /// /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm /// of their lower bound. - pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { + pub fn num_entries_per_kbucket( + &mut self, + ) -> impl ExactSizeIterator)> { self.discovery.num_entries_per_kbucket() } @@ -243,7 +251,9 @@ impl Behaviour { } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { self.discovery.kademlia_records_total_size() } @@ -265,7 +275,8 @@ impl Behaviour { pending_response: oneshot::Sender, RequestFailure>>, connect: IfDisconnected, ) { - self.request_responses.send_request(target, protocol, request, pending_response, connect) + self.request_responses + .send_request(target, protocol, request, pending_response, connect) } /// Returns a shared reference to the user protocol. @@ -307,21 +318,20 @@ fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { } } -impl NetworkBehaviourEventProcess for -Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: void::Void) { void::unreachable(event) } } -impl NetworkBehaviourEventProcess> for -Behaviour { +impl NetworkBehaviourEventProcess> for Behaviour { fn inject_event(&mut self, event: CustomMessageOutcome) { match event { CustomMessageOutcome::BlockImport(origin, blocks) => self.events.push_back(BehaviourOut::BlockImport(origin, blocks)), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - self.events.push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => self + .events + .push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), CustomMessageOutcome::BlockRequest { target, request, pending_response } => { let mut buf = Vec::with_capacity(request.encoded_len()); if let Err(err) = request.encode(&mut buf) { @@ -334,7 +344,11 @@ Behaviour { } self.request_responses.send_request( - &target, &self.block_request_protocol_name, buf, pending_response, IfDisconnected::ImmediateError, + &target, + &self.block_request_protocol_name, + buf, + pending_response, + IfDisconnected::ImmediateError, ); }, CustomMessageOutcome::StateRequest { target, request, pending_response } => { @@ -349,11 +363,19 @@ Behaviour { } self.request_responses.send_request( - &target, &self.state_request_protocol_name, buf, pending_response, IfDisconnected::ImmediateError, + &target, + &self.state_request_protocol_name, + buf, + pending_response, + IfDisconnected::ImmediateError, ); }, CustomMessageOutcome::NotificationStreamOpened { - remote, protocol, negotiated_fallback, roles, notifications_sink + remote, + protocol, + negotiated_fallback, + roles, + notifications_sink, } => { self.events.push_back(BehaviourOut::NotificationStreamOpened { remote, @@ -363,32 +385,33 @@ Behaviour { notifications_sink: notifications_sink.clone(), }); }, - CustomMessageOutcome::NotificationStreamReplaced { remote, protocol, notifications_sink } => - self.events.push_back(BehaviourOut::NotificationStreamReplaced { - remote, - protocol, - notifications_sink, - }), - CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => - self.events.push_back(BehaviourOut::NotificationStreamClosed { - remote, - protocol, - }), + CustomMessageOutcome::NotificationStreamReplaced { + remote, + protocol, + notifications_sink, + } => self.events.push_back(BehaviourOut::NotificationStreamReplaced { + remote, + protocol, + notifications_sink, + }), + CustomMessageOutcome::NotificationStreamClosed { remote, protocol } => self + .events + .push_back(BehaviourOut::NotificationStreamClosed { remote, protocol }), CustomMessageOutcome::NotificationsReceived { remote, messages } => { self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); }, CustomMessageOutcome::PeerNewBest(peer_id, number) => { self.light_client_request_sender.update_best_block(&peer_id, number); - } + }, CustomMessageOutcome::SyncConnected(peer_id) => { self.light_client_request_sender.inject_connected(peer_id); self.events.push_back(BehaviourOut::SyncConnected(peer_id)) - } + }, CustomMessageOutcome::SyncDisconnected(peer_id) => { self.light_client_request_sender.inject_disconnected(peer_id); self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)) - } - CustomMessageOutcome::None => {} + }, + CustomMessageOutcome::None => {}, } } } @@ -397,38 +420,29 @@ impl NetworkBehaviourEventProcess for Behav fn inject_event(&mut self, event: request_responses::Event) { match event { request_responses::Event::InboundRequest { peer, protocol, result } => { - self.events.push_back(BehaviourOut::InboundRequest { + self.events.push_back(BehaviourOut::InboundRequest { peer, protocol, result }); + }, + request_responses::Event::RequestFinished { peer, protocol, duration, result } => { + self.events.push_back(BehaviourOut::RequestFinished { peer, protocol, + duration, result, }); - } - request_responses::Event::RequestFinished { peer, protocol, duration, result } => { - self.events.push_back(BehaviourOut::RequestFinished { - peer, protocol, duration, result, - }); }, - request_responses::Event::ReputationChanges { peer, changes } => { + request_responses::Event::ReputationChanges { peer, changes } => for change in changes { self.substrate.report_peer(peer, change); - } - } + }, } } } -impl NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { let peer_info::PeerInfoEvent::Identified { peer_id, - info: IdentifyInfo { - protocol_version, - agent_version, - mut listen_addrs, - protocols, - .. - }, + info: IdentifyInfo { protocol_version, agent_version, mut listen_addrs, protocols, .. }, } = event; if listen_addrs.len() > 30 { @@ -447,8 +461,7 @@ impl NetworkBehaviourEventProcess } } -impl NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, out: DiscoveryOut) { match out { DiscoveryOut::UnroutablePeer(_peer_id) => { @@ -456,27 +469,28 @@ impl NetworkBehaviourEventProcess // to Kademlia is handled by the `Identify` protocol, part of the // `PeerInfoBehaviour`. See the `NetworkBehaviourEventProcess` // implementation for `PeerInfoEvent`. - } + }, DiscoveryOut::Discovered(peer_id) => { self.substrate.add_default_set_discovered_nodes(iter::once(peer_id)); - } + }, DiscoveryOut::ValueFound(results, duration) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); - } + self.events + .push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); + }, DiscoveryOut::ValueNotFound(key, duration) => { self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration)); - } + }, DiscoveryOut::ValuePut(key, duration) => { self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key), duration)); - } + }, DiscoveryOut::ValuePutFailed(key, duration) => { - self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); - } - DiscoveryOut::RandomKademliaStarted(protocols) => { + self.events + .push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); + }, + DiscoveryOut::RandomKademliaStarted(protocols) => for protocol in protocols { self.events.push_back(BehaviourOut::RandomKademliaStarted(protocol)); - } - } + }, } } } @@ -488,22 +502,16 @@ impl Behaviour { _: &mut impl PollParameters, ) -> Poll>> { use light_client_requests::sender::OutEvent; - while let Poll::Ready(Some(event)) = - self.light_client_request_sender.poll_next_unpin(cx) - { + while let Poll::Ready(Some(event)) = self.light_client_request_sender.poll_next_unpin(cx) { match event { - OutEvent::SendRequest { - target, - request, - pending_response, - protocol_name, - } => self.request_responses.send_request( - &target, - &protocol_name, - request, - pending_response, - IfDisconnected::ImmediateError, - ), + OutEvent::SendRequest { target, request, pending_response, protocol_name } => + self.request_responses.send_request( + &target, + &protocol_name, + request, + pending_response, + IfDisconnected::ImmediateError, + ), } } diff --git a/substrate/client/network/src/bitswap.rs b/substrate/client/network/src/bitswap.rs index aea2b8420cb2ca1be2b772e00fb641de0cc8256b..3a10367c64a4b9c773e8c929a7a23ce2ce011a3b 100644 --- a/substrate/client/network/src/bitswap.rs +++ b/substrate/client/network/src/bitswap.rs @@ -20,31 +20,39 @@ //! Only supports bitswap 1.2.0. //! CID is expected to reference 256-bit Blake2b transaction hash. -use std::collections::VecDeque; -use std::io; -use std::sync::Arc; -use std::task::{Context, Poll}; +use crate::{ + chain::Client, + schema::bitswap::{ + message::{wantlist::WantType, Block as MessageBlock, BlockPresence, BlockPresenceType}, + Message as BitswapMessage, + }, +}; use cid::Version; use core::pin::Pin; -use futures::Future; -use futures::io::{AsyncRead, AsyncWrite}; -use libp2p::core::{ - connection::ConnectionId, Multiaddr, PeerId, - upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo, +use futures::{ + io::{AsyncRead, AsyncWrite}, + Future, }; -use libp2p::swarm::{ - NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, - ProtocolsHandler, IntoProtocolsHandler, OneShotHandler, +use libp2p::{ + core::{ + connection::ConnectionId, upgrade, InboundUpgrade, Multiaddr, OutboundUpgrade, PeerId, + UpgradeInfo, + }, + swarm::{ + IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, + OneShotHandler, PollParameters, ProtocolsHandler, + }, }; -use log::{error, debug, trace}; +use log::{debug, error, trace}; use prost::Message; -use sp_runtime::traits::{Block as BlockT}; -use unsigned_varint::{encode as varint_encode}; -use crate::chain::Client; -use crate::schema::bitswap::{ - Message as BitswapMessage, - message::{wantlist::WantType, Block as MessageBlock, BlockPresenceType, BlockPresence}, +use sp_runtime::traits::Block as BlockT; +use std::{ + collections::VecDeque, + io, + sync::Arc, + task::{Context, Poll}, }; +use unsigned_varint::encode as varint_encode; const LOG_TARGET: &str = "bitswap"; @@ -182,10 +190,7 @@ pub struct Bitswap { impl Bitswap { /// Create a new instance of the bitswap protocol handler. pub fn new(client: Arc>) -> Self { - Bitswap { - client, - ready_blocks: Default::default(), - } + Bitswap { client, ready_blocks: Default::default() } } } @@ -201,11 +206,9 @@ impl NetworkBehaviour for Bitswap { Vec::new() } - fn inject_connected(&mut self, _peer: &PeerId) { - } + fn inject_connected(&mut self, _peer: &PeerId) {} - fn inject_disconnected(&mut self, _peer: &PeerId) { - } + fn inject_disconnected(&mut self, _peer: &PeerId) {} fn inject_event(&mut self, peer: PeerId, _connection: ConnectionId, message: HandlerEvent) { let request = match message { @@ -215,7 +218,7 @@ impl NetworkBehaviour for Bitswap { trace!(target: LOG_TARGET, "Received request: {:?} from {}", request, peer); if self.ready_blocks.len() > MAX_RESPONSE_QUEUE { debug!(target: LOG_TARGET, "Ignored request: queue is full"); - return; + return } let mut response = BitswapMessage { wantlist: None, @@ -227,29 +230,25 @@ impl NetworkBehaviour for Bitswap { let wantlist = match request.wantlist { Some(wantlist) => wantlist, None => { - debug!( - target: LOG_TARGET, - "Unexpected bitswap message from {}", - peer, - ); - return; - } + debug!(target: LOG_TARGET, "Unexpected bitswap message from {}", peer,); + return + }, }; if wantlist.entries.len() > MAX_WANTED_BLOCKS { trace!(target: LOG_TARGET, "Ignored request: too many entries"); - return; + return } for entry in wantlist.entries { let cid = match cid::Cid::read_bytes(entry.block.as_slice()) { Ok(cid) => cid, Err(e) => { trace!(target: LOG_TARGET, "Bad CID {:?}: {:?}", entry.block, e); - continue; - } + continue + }, }; - if cid.version() != cid::Version::V1 - || cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) - || cid.hash().size() != 32 + if cid.version() != cid::Version::V1 || + cid.hash().code() != u64::from(cid::multihash::Code::Blake2b256) || + cid.hash().size() != 32 { debug!(target: LOG_TARGET, "Ignoring unsupported CID {}: {}", peer, cid); continue @@ -261,7 +260,7 @@ impl NetworkBehaviour for Bitswap { Err(e) => { error!(target: LOG_TARGET, "Error retrieving transaction {}: {}", hash, e); None - } + }, }; match transaction { Some(transaction) => { @@ -273,10 +272,9 @@ impl NetworkBehaviour for Bitswap { mh_type: cid.hash().code(), mh_len: cid.hash().size(), }; - response.payload.push(MessageBlock { - prefix: prefix.to_bytes(), - data: transaction, - }); + response + .payload + .push(MessageBlock { prefix: prefix.to_bytes(), data: transaction }); } else { response.block_presences.push(BlockPresence { r#type: BlockPresenceType::Have as i32, @@ -292,7 +290,7 @@ impl NetworkBehaviour for Bitswap { cid: cid.to_bytes(), }); } - } + }, } } trace!(target: LOG_TARGET, "Response: {:?}", response); @@ -304,7 +302,7 @@ impl NetworkBehaviour for Bitswap { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent, >, - > { + >{ if let Some((peer_id, message)) = self.ready_blocks.pop_front() { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), diff --git a/substrate/client/network/src/block_request_handler.rs b/substrate/client/network/src/block_request_handler.rs index ce65e5eca3457c20459ef47523b8225ad32e04bf..66ae0d43bb22f55b599e2197318a438e3c294cdd 100644 --- a/substrate/client/network/src/block_request_handler.rs +++ b/substrate/client/network/src/block_request_handler.rs @@ -17,25 +17,32 @@ //! Helper for handling (i.e. answering) block requests from a remote peer via the //! [`crate::request_responses::RequestResponsesBehaviour`]. -use codec::{Encode, Decode}; -use crate::chain::Client; -use crate::config::ProtocolId; -use crate::protocol::{message::BlockAttributes}; -use crate::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; -use crate::schema::v1::block_request::FromBlock; -use crate::schema::v1::{BlockResponse, Direction}; -use crate::{PeerId, ReputationChange}; -use futures::channel::{mpsc, oneshot}; -use futures::stream::StreamExt; +use crate::{ + chain::Client, + config::ProtocolId, + protocol::message::BlockAttributes, + request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + schema::v1::{block_request::FromBlock, BlockResponse, Direction}, + PeerId, ReputationChange, +}; +use codec::{Decode, Encode}; +use futures::{ + channel::{mpsc, oneshot}, + stream::StreamExt, +}; use log::debug; use lru::LruCache; use prost::Message; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, One, Zero}; -use std::cmp::min; -use std::sync::Arc; -use std::time::Duration; -use std::hash::{Hasher, Hash}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header, One, Zero}, +}; +use std::{ + cmp::min, + hash::{Hash, Hasher}, + sync::Arc, + time::Duration, +}; const LOG_TARGET: &str = "sync"; const MAX_BLOCKS_IN_RESPONSE: usize = 128; @@ -61,7 +68,6 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } /// Generate the block protocol name from chain specific protocol identifier. -// // Visibility `pub(crate)` to allow `crate::light_client_requests::sender` to generate block request // protocol name and send block requests. pub(crate) fn generate_protocol_name(protocol_id: &ProtocolId) -> String { @@ -139,9 +145,7 @@ impl BlockRequestHandler { Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer), Err(e) => debug!( target: LOG_TARGET, - "Failed to handle block request from {}: {}", - peer, - e, + "Failed to handle block request from {}: {}", peer, e, ), } } @@ -159,11 +163,11 @@ impl BlockRequestHandler { FromBlock::Hash(ref h) => { let h = Decode::decode(&mut h.as_ref())?; BlockId::::Hash(h) - } + }, FromBlock::Number(ref n) => { let n = Decode::decode(&mut n.as_ref())?; BlockId::::Number(n) - } + }, }; let max_blocks = if request.max_blocks == 0 { @@ -172,8 +176,8 @@ impl BlockRequestHandler { min(request.max_blocks as usize, MAX_BLOCKS_IN_RESPONSE) }; - let direction = Direction::from_i32(request.direction) - .ok_or(HandleRequestError::ParseDirection)?; + let direction = + Direction::from_i32(request.direction).ok_or(HandleRequestError::ParseDirection)?; let attributes = BlockAttributes::from_be_u32(request.fields)?; @@ -201,7 +205,7 @@ impl BlockRequestHandler { }, None => { self.seen_requests.put(key.clone(), SeenRequestsValue::First); - } + }, } debug!( @@ -247,11 +251,13 @@ impl BlockRequestHandler { Err(()) }; - pending_response.send(OutgoingResponse { - result, - reputation_changes: reputation_change.into_iter().collect(), - sent_feedback: None, - }).map_err(|_| HandleRequestError::SendResponse) + pending_response + .send(OutgoingResponse { + result, + reputation_changes: reputation_change.into_iter().collect(), + sent_feedback: None, + }) + .map_err(|_| HandleRequestError::SendResponse) } fn get_block_response( @@ -298,10 +304,8 @@ impl BlockRequestHandler { let justification = justifications.and_then(|just| just.into_justification(*b"FRNK")); - let is_empty_justification = justification - .as_ref() - .map(|j| j.is_empty()) - .unwrap_or(false); + let is_empty_justification = + justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); let justification = justification.unwrap_or_default(); @@ -310,25 +314,27 @@ impl BlockRequestHandler { let body = if get_body { match self.client.block_body(&BlockId::Hash(hash))? { - Some(mut extrinsics) => extrinsics.iter_mut() - .map(|extrinsic| extrinsic.encode()) - .collect(), + Some(mut extrinsics) => + extrinsics.iter_mut().map(|extrinsic| extrinsic.encode()).collect(), None => { log::trace!(target: LOG_TARGET, "Missing data for block request."); - break; - } + break + }, } } else { Vec::new() }; - let indexed_body = if get_indexed_body { + let indexed_body = if get_indexed_body { match self.client.block_indexed_body(&BlockId::Hash(hash))? { Some(transactions) => transactions, None => { - log::trace!(target: LOG_TARGET, "Missing indexed block data for block request."); - break; - } + log::trace!( + target: LOG_TARGET, + "Missing indexed block data for block request." + ); + break + }, } } else { Vec::new() @@ -336,11 +342,7 @@ impl BlockRequestHandler { let block_data = crate::schema::v1::BlockData { hash: hash.encode(), - header: if get_header { - header.encode() - } else { - Vec::new() - }, + header: if get_header { header.encode() } else { Vec::new() }, body, receipt: Vec::new(), message_queue: Vec::new(), @@ -358,15 +360,13 @@ impl BlockRequestHandler { } match direction { - Direction::Ascending => { - block_id = BlockId::Number(number + One::one()) - } + Direction::Ascending => block_id = BlockId::Number(number + One::one()), Direction::Descending => { if number.is_zero() { break } block_id = BlockId::Hash(parent_hash) - } + }, } } diff --git a/substrate/client/network/src/chain.rs b/substrate/client/network/src/chain.rs index 32d4cc9ff024f6cdb8c1d18a0d3e882292026cba..599e9d796c118cd7904d07cc372bb73169628b21 100644 --- a/substrate/client/network/src/chain.rs +++ b/substrate/client/network/src/chain.rs @@ -18,18 +18,30 @@ //! Blockchain access trait -use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sc_client_api::{BlockBackend, ProofProvider}; +pub use sc_client_api::{ImportedState, StorageData, StorageKey}; +use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; -pub use sc_client_api::{StorageKey, StorageData, ImportedState}; /// Local client abstraction for the network. -pub trait Client: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync -{} - -impl Client for T - where - T: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync -{} +pub trait Client: + HeaderBackend + + ProofProvider + + BlockIdTo + + BlockBackend + + HeaderMetadata + + Send + + Sync +{ +} + +impl Client for T where + T: HeaderBackend + + ProofProvider + + BlockIdTo + + BlockBackend + + HeaderMetadata + + Send + + Sync +{ +} diff --git a/substrate/client/network/src/config.rs b/substrate/client/network/src/config.rs index 8cc467a7fb9fd29bf397559d00892cf16b64e95c..cddc52352485e1fb4aec57ae78e0f44864ff1944 100644 --- a/substrate/client/network/src/config.rs +++ b/substrate/client/network/src/config.rs @@ -21,14 +21,14 @@ //! The [`Params`] struct is the struct that must be passed in order to initialize the networking. //! See the documentation of [`Params`]. -pub use crate::chain::Client; -pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; -pub use crate::request_responses::{ - IncomingRequest, - OutgoingResponse, - ProtocolConfig as RequestResponseConfig, +pub use crate::{ + chain::Client, + on_demand_layer::{AlwaysBadChecker, OnDemand}, + request_responses::{ + IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, + }, }; -pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; +pub use libp2p::{build_multiaddr, core::PublicKey, identity, wasm_ext::ExtTransport}; // Note: this re-export shouldn't be part of the public API of the crate and will be removed in // the future. @@ -46,15 +46,19 @@ use libp2p::{ use prometheus_endpoint::Registry; use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; use sp_runtime::traits::Block as BlockT; -use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; use std::{ + borrow::Cow, collections::HashMap, + convert::TryFrom, error::Error, fs, + future::Future, io::{self, Write}, net::Ipv4Addr, path::{Path, PathBuf}, + pin::Pin, str, + str::FromStr, sync::Arc, }; use zeroize::Zeroize; @@ -181,7 +185,7 @@ pub enum TransactionImport { } /// Future resolving to transaction import result. -pub type TransactionImportFuture = Pin + Send>>; +pub type TransactionImportFuture = Pin + Send>>; /// Transaction pool interface pub trait TransactionPool: Send + Sync { @@ -192,10 +196,7 @@ pub trait TransactionPool: Send + Sync { /// Import a transaction into the pool. /// /// This will return future. - fn import( - &self, - transaction: B::Extrinsic, - ) -> TransactionImportFuture; + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture; /// Notify the pool about transactions broadcast. fn on_broadcasted(&self, propagations: HashMap>); /// Get transaction by hash. @@ -219,16 +220,15 @@ impl TransactionPool for EmptyTransaction Default::default() } - fn import( - &self, - _transaction: B::Extrinsic - ) -> TransactionImportFuture { + fn import(&self, _transaction: B::Extrinsic) -> TransactionImportFuture { Box::pin(future::ready(TransactionImport::KnownGood)) } fn on_broadcasted(&self, _: HashMap>) {} - fn transaction(&self, _h: &H) -> Option { None } + fn transaction(&self, _h: &H) -> Option { + None + } } /// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. @@ -267,17 +267,16 @@ impl fmt::Debug for ProtocolId { /// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); /// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); /// ``` -/// pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { let addr: Multiaddr = addr_str.parse()?; parse_addr(addr) } /// Splits a Multiaddress into a Multiaddress and PeerId. -pub fn parse_addr(mut addr: Multiaddr)-> Result<(PeerId, Multiaddr), ParseErr> { +pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| ParseErr::InvalidPeerId)?, + Some(multiaddr::Protocol::P2p(key)) => + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)?, _ => return Err(ParseErr::PeerIdMissing), }; @@ -325,10 +324,7 @@ impl FromStr for MultiaddrWithPeerId { fn from_str(s: &str) -> Result { let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(MultiaddrWithPeerId { - peer_id, - multiaddr, - }) + Ok(MultiaddrWithPeerId { peer_id, multiaddr }) } } @@ -504,18 +500,13 @@ impl NetworkConfiguration { /// Create new default configuration for localhost-only connection with random port (useful for testing) pub fn new_local() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - - config.listen_addresses = vec![ - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect() - ]; + .collect()]; config.allow_non_globals_in_dht = true; config @@ -523,18 +514,13 @@ impl NetworkConfiguration { /// Create new default configuration for localhost-only connection with random port (useful for testing) pub fn new_memory() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - - config.listen_addresses = vec![ - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect() - ]; + .collect()]; config.allow_non_globals_in_dht = true; config @@ -674,7 +660,7 @@ impl NonReservedPeerMode { #[derive(Clone, Debug)] pub enum NodeKeyConfig { /// A Ed25519 secret key configuration. - Ed25519(Secret) + Ed25519(Secret), } impl Default for NodeKeyConfig { @@ -698,7 +684,7 @@ pub enum Secret { /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. File(PathBuf), /// Always generate a new secret key `K`. - New + New, } impl fmt::Debug for Secret { @@ -725,35 +711,27 @@ impl NodeKeyConfig { pub fn into_keypair(self) -> io::Result { use NodeKeyConfig::*; match self { - Ed25519(Secret::New) => - Ok(Keypair::generate_ed25519()), - - Ed25519(Secret::Input(k)) => - Ok(Keypair::Ed25519(k.into())), - - Ed25519(Secret::File(f)) => - get_secret( - f, - |mut b| { - match String::from_utf8(b.to_vec()) - .ok() - .and_then(|s|{ - if s.len() == 64 { - hex::decode(&s).ok() - } else { - None - }} - ) - { - Some(s) => ed25519::SecretKey::from_bytes(s), - _ => ed25519::SecretKey::from_bytes(&mut b), - } - }, - ed25519::SecretKey::generate, - |b| b.as_ref().to_vec() - ) - .map(ed25519::Keypair::from) - .map(Keypair::Ed25519), + Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), + + Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())), + + Ed25519(Secret::File(f)) => get_secret( + f, + |mut b| match String::from_utf8(b.to_vec()).ok().and_then(|s| { + if s.len() == 64 { + hex::decode(&s).ok() + } else { + None + } + }) { + Some(s) => ed25519::SecretKey::from_bytes(s), + _ => ed25519::SecretKey::from_bytes(&mut b), + }, + ed25519::SecretKey::generate, + |b| b.as_ref().to_vec(), + ) + .map(ed25519::Keypair::from) + .map(Keypair::Ed25519), } } } @@ -770,9 +748,9 @@ where W: Fn(&K) -> Vec, { std::fs::read(&file) - .and_then(|mut sk_bytes| - parse(&mut sk_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))) + .and_then(|mut sk_bytes| { + parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + }) .or_else(|e| { if e.kind() == io::ErrorKind::NotFound { file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; @@ -790,7 +768,7 @@ where /// Write secret bytes to a file. fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> where - P: AsRef + P: AsRef, { let mut file = open_secret_file(&path)?; file.write_all(sk_bytes) @@ -800,26 +778,19 @@ where #[cfg(unix)] fn open_secret_file

(path: P) -> io::Result where - P: AsRef + P: AsRef, { use std::os::unix::fs::OpenOptionsExt; - fs::OpenOptions::new() - .write(true) - .create_new(true) - .mode(0o600) - .open(path) + fs::OpenOptions::new().write(true).create_new(true).mode(0o600).open(path) } /// Opens a file containing a secret key in write mode. #[cfg(not(unix))] fn open_secret_file

(path: P) -> Result where - P: AsRef + P: AsRef, { - fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(path) + fs::OpenOptions::new().write(true).create_new(true).open(path) } #[cfg(test)] @@ -835,7 +806,7 @@ mod tests { match kp { Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), - _ => panic!("Unexpected keypair.") + _ => panic!("Unexpected keypair."), } } diff --git a/substrate/client/network/src/discovery.rs b/substrate/client/network/src/discovery.rs index 0f2a501bcdeff284e592a0d35f8d9e62d7b3e9b0..da50ded077d5ef19cac13d06cb363b6531a1a31b 100644 --- a/substrate/client/network/src/discovery.rs +++ b/substrate/client/network/src/discovery.rs @@ -45,28 +45,43 @@ //! **Important**: In order for the discovery mechanism to work properly, there needs to be an //! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn //! of a node's address, you must call `add_self_reported_address`. -//! -use crate::config::ProtocolId; -use crate::utils::LruHashSet; +use crate::{config::ProtocolId, utils::LruHashSet}; use futures::prelude::*; use futures_timer::Delay; use ip_network::IpNetwork; -use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler, IntoProtocolsHandler}; -use libp2p::swarm::protocols_handler::multi::IntoMultiHandler; -use libp2p::kad::{Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, QueryResult, Quorum, Record}; -use libp2p::kad::GetClosestPeersError; -use libp2p::kad::handler::KademliaHandlerProto; -use libp2p::kad::QueryId; -use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; #[cfg(not(target_os = "unknown"))] use libp2p::mdns::{Mdns, MdnsConfig, MdnsEvent}; -use libp2p::multiaddr::Protocol; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, Multiaddr, PeerId, PublicKey, + }, + kad::{ + handler::KademliaHandlerProto, + record::{ + self, + store::{MemoryStore, RecordStore}, + }, + GetClosestPeersError, Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, + QueryId, QueryResult, Quorum, Record, + }, + multiaddr::Protocol, + swarm::{ + protocols_handler::multi::IntoMultiHandler, IntoProtocolsHandler, NetworkBehaviour, + NetworkBehaviourAction, PollParameters, ProtocolsHandler, + }, +}; use log::{debug, info, trace, warn}; -use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, num::NonZeroUsize, time::Duration}; -use std::task::{Context, Poll}; use sp_core::hexdisplay::HexDisplay; +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, + io, + num::NonZeroUsize, + task::{Context, Poll}, + time::Duration, +}; /// Maximum number of known external addresses that we will cache. /// This only affects whether we will log whenever we (re-)discover @@ -101,7 +116,7 @@ impl DiscoveryConfig { discovery_only_if_under_num: std::u64::MAX, enable_mdns: false, kademlia_disjoint_query_paths: false, - protocol_ids: HashSet::new() + protocol_ids: HashSet::new(), } } @@ -114,7 +129,7 @@ impl DiscoveryConfig { /// Set custom nodes which never expire, e.g. bootstrap or reserved nodes. pub fn with_user_defined(&mut self, user_defined: I) -> &mut Self where - I: IntoIterator + I: IntoIterator, { self.user_defined.extend(user_defined); self @@ -152,7 +167,7 @@ impl DiscoveryConfig { pub fn add_protocol(&mut self, id: ProtocolId) -> &mut Self { if self.protocol_ids.contains(&id) { warn!(target: "sub-libp2p", "Discovery already registered for protocol {:?}", id); - return self; + return self } self.protocol_ids.insert(id); @@ -181,7 +196,8 @@ impl DiscoveryConfig { protocol_ids, } = self; - let kademlias = protocol_ids.into_iter() + let kademlias = protocol_ids + .into_iter() .map(|protocol_id| { let proto_name = protocol_name_from_protocol_id(&protocol_id); @@ -227,7 +243,7 @@ impl DiscoveryConfig { allow_non_globals_in_dht, known_external_addresses: LruHashSet::new( NonZeroUsize::new(MAX_KNOWN_EXTERNAL_ADDRESSES) - .expect("value is a constant; constant is non-zero; qed.") + .expect("value is a constant; constant is non-zero; qed."), ), } } @@ -305,7 +321,7 @@ impl DiscoveryBehaviour { &mut self, peer_id: &PeerId, supported_protocols: impl Iterator>, - addr: Multiaddr + addr: Multiaddr, ) { if !self.allow_non_globals_in_dht && !self.can_add_to_dht(&addr) { log::trace!(target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id); @@ -353,7 +369,8 @@ impl DiscoveryBehaviour { for k in self.kademlias.values_mut() { if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) { warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); - self.pending_events.push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0))); + self.pending_events + .push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0))); } } } @@ -362,14 +379,16 @@ impl DiscoveryBehaviour { /// /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm /// of their lower bound. - pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { - self.kademlias.iter_mut() - .map(|(id, kad)| { - let buckets = kad.kbuckets() - .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) - .collect(); - (id, buckets) - }) + pub fn num_entries_per_kbucket( + &mut self, + ) -> impl ExactSizeIterator)> { + self.kademlias.iter_mut().map(|(id, kad)| { + let buckets = kad + .kbuckets() + .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) + .collect(); + (id, buckets) + }) } /// Returns the number of records in the Kademlia record stores. @@ -382,7 +401,9 @@ impl DiscoveryBehaviour { } /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { // Note that this code is ok only because we use a `MemoryStore`. If the records were // for example stored on disk, this would load every single one of them every single time. self.kademlias.iter_mut().map(|(id, kad)| { @@ -394,7 +415,6 @@ impl DiscoveryBehaviour { /// Can the given `Multiaddr` be put into the DHT? /// /// This test is successful only for global IP addresses and DNS names. - // // NB: Currently all DNS names are allowed and no check for TLD suffixes is done // because the set of valid domains is highly dynamic and would require frequent // updates, for example by utilising publicsuffix.org or IANA. @@ -402,9 +422,9 @@ impl DiscoveryBehaviour { let ip = match addr.iter().next() { Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), - Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) - => return true, - _ => return false + Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) => + return true, + _ => return false, }; ip.is_global() } @@ -459,19 +479,24 @@ impl NetworkBehaviour for DiscoveryBehaviour { type OutEvent = DiscoveryOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { - let iter = self.kademlias.iter_mut() + let iter = self + .kademlias + .iter_mut() .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); - IntoMultiHandler::try_from_iter(iter) - .expect("There can be at most one handler per `ProtocolId` and \ + IntoMultiHandler::try_from_iter(iter).expect( + "There can be at most one handler per `ProtocolId` and \ protocol names contain the `ProtocolId` so no two protocol \ names in `self.kademlias` can be equal which is the only error \ `try_from_iter` can return, therefore this call is guaranteed \ - to succeed; qed") + to succeed; qed", + ) } fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - let mut list = self.user_defined.iter() + let mut list = self + .user_defined + .iter() .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) .collect::>(); @@ -488,7 +513,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { list_to_filter.retain(|addr| { if let Some(Protocol::Ip4(addr)) = addr.iter().next() { if addr.is_private() { - return false; + return false } } @@ -504,7 +529,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { list } - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.num_connections += 1; for k in self.kademlias.values_mut() { NetworkBehaviour::inject_connection_established(k, peer_id, conn, endpoint) @@ -517,7 +547,12 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.num_connections -= 1; for k in self.kademlias.values_mut() { NetworkBehaviour::inject_connection_closed(k, peer_id, conn, endpoint) @@ -534,7 +569,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { &mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, - error: &dyn std::error::Error + error: &dyn std::error::Error, ) { for k in self.kademlias.values_mut() { NetworkBehaviour::inject_addr_reach_failure(k, peer_id, addr, error) @@ -556,8 +591,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - let new_addr = addr.clone() - .with(Protocol::P2p(self.local_peer_id.clone().into())); + let new_addr = addr.clone().with(Protocol::P2p(self.local_peer_id.clone().into())); // NOTE: we might re-discover the same address multiple times // in which case we just want to refrain from logging. @@ -627,10 +661,10 @@ impl NetworkBehaviour for DiscoveryBehaviour { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent, >, - > { + >{ // Immediately process the content of `discovered`. if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } // Poll the stream that fires when we need to start a random Kademlia query. @@ -657,12 +691,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { // Schedule the next random query with exponentially increasing delay, // capped at 60 seconds. *next_kad_random_query = Delay::new(self.duration_to_next_kad); - self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, - Duration::from_secs(60)); + self.duration_to_next_kad = + cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); if actually_started { - let ev = DiscoveryOut::RandomKademliaStarted(self.kademlias.keys().cloned().collect()); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + let ev = DiscoveryOut::RandomKademliaStarted( + self.kademlias.keys().cloned().collect(), + ); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) } } } @@ -674,86 +710,112 @@ impl NetworkBehaviour for DiscoveryBehaviour { NetworkBehaviourAction::GenerateEvent(ev) => match ev { KademliaEvent::RoutingUpdated { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, KademliaEvent::UnroutablePeer { peer, .. } => { let ev = DiscoveryOut::UnroutablePeer(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, KademliaEvent::RoutablePeer { peer, .. } => { let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, KademliaEvent::PendingRoutablePeer { .. } => { // We are not interested in this event at the moment. - } - KademliaEvent::QueryResult { result: QueryResult::GetClosestPeers(res), .. } => { - match res { - Err(GetClosestPeersError::Timeout { key, peers }) => { - debug!(target: "sub-libp2p", + }, + KademliaEvent::QueryResult { + result: QueryResult::GetClosestPeers(res), + .. + } => match res { + Err(GetClosestPeersError::Timeout { key, peers }) => { + debug!(target: "sub-libp2p", "Libp2p => Query for {:?} timed out with {} results", HexDisplay::from(&key), peers.len()); - }, - Ok(ok) => { - trace!(target: "sub-libp2p", + }, + Ok(ok) => { + trace!(target: "sub-libp2p", "Libp2p => Query for {:?} yielded {:?} results", HexDisplay::from(&ok.key), ok.peers.len()); - if ok.peers.is_empty() && self.num_connections != 0 { - debug!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ + if ok.peers.is_empty() && self.num_connections != 0 { + debug!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ results"); - } } - } - } - KademliaEvent::QueryResult { result: QueryResult::GetRecord(res), stats, .. } => { + }, + }, + KademliaEvent::QueryResult { + result: QueryResult::GetRecord(res), + stats, + .. + } => { let ev = match res { Ok(ok) => { - let results = ok.records + let results = ok + .records .into_iter() .map(|r| (r.record.key, r.record.value)) .collect(); - DiscoveryOut::ValueFound(results, stats.duration().unwrap_or_else(Default::default)) - } + DiscoveryOut::ValueFound( + results, + stats.duration().unwrap_or_else(Default::default), + ) + }, Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { trace!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) - } + DiscoveryOut::ValueNotFound( + e.into_key(), + stats.duration().unwrap_or_else(Default::default), + ) + }, Err(e) => { debug!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) - } + DiscoveryOut::ValueNotFound( + e.into_key(), + stats.duration().unwrap_or_else(Default::default), + ) + }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::QueryResult { result: QueryResult::PutRecord(res), stats, .. } => { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::QueryResult { + result: QueryResult::PutRecord(res), + stats, + .. + } => { let ev = match res { - Ok(ok) => DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_else(Default::default)), + Ok(ok) => DiscoveryOut::ValuePut( + ok.key, + stats.duration().unwrap_or_else(Default::default), + ), Err(e) => { debug!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); - DiscoveryOut::ValuePutFailed(e.into_key(), stats.duration().unwrap_or_else(Default::default)) - } + DiscoveryOut::ValuePutFailed( + e.into_key(), + stats.duration().unwrap_or_else(Default::default), + ) + }, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::QueryResult { result: QueryResult::RepublishRecord(res), .. } => { - match res { - Ok(ok) => debug!(target: "sub-libp2p", + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + }, + KademliaEvent::QueryResult { + result: QueryResult::RepublishRecord(res), + .. + } => match res { + Ok(ok) => debug!(target: "sub-libp2p", "Libp2p => Record republished: {:?}", ok.key), - Err(e) => debug!(target: "sub-libp2p", + Err(e) => debug!(target: "sub-libp2p", "Libp2p => Republishing of record {:?} failed with: {:?}", - e.key(), e) - } - } + e.key(), e), + }, // We never start any other type of query. e => { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) - } - } + }, + }, NetworkBehaviourAction::DialAddress { address } => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), NetworkBehaviourAction::DialPeer { peer_id, condition } => @@ -762,10 +824,13 @@ impl NetworkBehaviour for DiscoveryBehaviour { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, - event: (pid.clone(), event) + event: (pid.clone(), event), }), NetworkBehaviourAction::ReportObservedAddr { address, score } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), } } } @@ -774,29 +839,30 @@ impl NetworkBehaviour for DiscoveryBehaviour { #[cfg(not(target_os = "unknown"))] while let Poll::Ready(ev) = self.mdns.poll(cx, params) { match ev { - NetworkBehaviourAction::GenerateEvent(event) => { - match event { - MdnsEvent::Discovered(list) => { - if self.num_connections >= self.discovery_only_if_under_num { - continue; - } + NetworkBehaviourAction::GenerateEvent(event) => match event { + MdnsEvent::Discovered(list) => { + if self.num_connections >= self.discovery_only_if_under_num { + continue + } - self.pending_events.extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - }, - MdnsEvent::Expired(_) => {} - } + self.pending_events + .extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) + } + }, + MdnsEvent::Expired(_) => {}, }, NetworkBehaviourAction::DialAddress { address } => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), NetworkBehaviourAction::DialPeer { peer_id, condition } => return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), - NetworkBehaviourAction::NotifyHandler { event, .. } => - match event {}, // `event` is an enum with no variant + NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, /* `event` is an enum with no variant */ NetworkBehaviourAction::ReportObservedAddr { address, score } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), } } @@ -839,15 +905,14 @@ impl MdnsWrapper { ) -> Poll> { loop { match self { - MdnsWrapper::Instantiating(fut) => { + MdnsWrapper::Instantiating(fut) => *self = match futures::ready!(fut.as_mut().poll(cx)) { Ok(mdns) => MdnsWrapper::Ready(mdns), Err(err) => { warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); MdnsWrapper::Disabled }, - } - } + }, MdnsWrapper::Ready(mdns) => return mdns.poll(cx, params), MdnsWrapper::Disabled => return Poll::Pending, } @@ -857,17 +922,20 @@ impl MdnsWrapper { #[cfg(test)] mod tests { + use super::{protocol_name_from_protocol_id, DiscoveryConfig, DiscoveryOut}; use crate::config::ProtocolId; use futures::prelude::*; - use libp2p::identity::Keypair; - use libp2p::{Multiaddr, PeerId}; - use libp2p::core::upgrade; - use libp2p::core::transport::{Transport, MemoryTransport}; - use libp2p::noise; - use libp2p::swarm::Swarm; - use libp2p::yamux; + use libp2p::{ + core::{ + transport::{MemoryTransport, Transport}, + upgrade, + }, + identity::Keypair, + noise, + swarm::Swarm, + yamux, Multiaddr, PeerId, + }; use std::{collections::HashSet, task::Poll}; - use super::{DiscoveryConfig, DiscoveryOut, protocol_name_from_protocol_id}; #[test] fn discovery_working() { @@ -876,50 +944,56 @@ mod tests { // Build swarms whose behaviour is `DiscoveryBehaviour`, each aware of // the first swarm via `with_user_defined`. - let mut swarms = (0..25).map(|i| { - let keypair = Keypair::generate_ed25519(); - - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); - - let transport = MemoryTransport - .upgrade(upgrade::Version::V1) - .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) - .multiplex(yamux::YamuxConfig::default()) - .boxed(); - - let behaviour = { - let mut config = DiscoveryConfig::new(keypair.public()); - config.with_user_defined(first_swarm_peer_id_and_addr.clone()) - .allow_private_ipv4(true) - .allow_non_globals_in_dht(true) - .discovery_limit(50) - .add_protocol(protocol_id.clone()); - - config.finish() - }; + let mut swarms = (0..25) + .map(|i| { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = + noise::Keypair::::new().into_authentic(&keypair).unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(yamux::YamuxConfig::default()) + .boxed(); + + let behaviour = { + let mut config = DiscoveryConfig::new(keypair.public()); + config + .with_user_defined(first_swarm_peer_id_and_addr.clone()) + .allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(protocol_id.clone()); + + config.finish() + }; - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); - let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = + format!("/memory/{}", rand::random::()).parse().unwrap(); - if i == 0 { - first_swarm_peer_id_and_addr = Some((keypair.public().into_peer_id(), listen_addr.clone())) - } + if i == 0 { + first_swarm_peer_id_and_addr = + Some((keypair.public().into_peer_id(), listen_addr.clone())) + } - swarm.listen_on(listen_addr.clone()).unwrap(); - (swarm, listen_addr) - }).collect::>(); + swarm.listen_on(listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }) + .collect::>(); // Build a `Vec>` with the list of nodes remaining to be discovered. - let mut to_discover = (0..swarms.len()).map(|n| { - (0..swarms.len()) - // Skip the first swarm as all other swarms already know it. - .skip(1) - .filter(|p| *p != n) - .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) - .collect::>() - }).collect::>(); + let mut to_discover = (0..swarms.len()) + .map(|n| { + (0..swarms.len()) + // Skip the first swarm as all other swarms already know it. + .skip(1) + .filter(|p| *p != n) + .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) + .collect::>() + }) + .collect::>(); let fut = futures::future::poll_fn(move |cx| { 'polling: loop { @@ -927,13 +1001,17 @@ mod tests { match swarms[swarm_n].0.poll_next_unpin(cx) { Poll::Ready(Some(e)) => { match e { - DiscoveryOut::UnroutablePeer(other) | DiscoveryOut::Discovered(other) => { + DiscoveryOut::UnroutablePeer(other) | + DiscoveryOut::Discovered(other) => { // Call `add_self_reported_address` to simulate identify happening. - let addr = swarms.iter().find_map(|(s, a)| - if s.behaviour().local_peer_id == other { - Some(a.clone()) - } else { - None + let addr = swarms + .iter() + .find_map(|(s, a)| { + if s.behaviour().local_peer_id == other { + Some(a.clone()) + } else { + None + } }) .unwrap(); swarms[swarm_n].0.behaviour_mut().add_self_reported_address( @@ -945,11 +1023,13 @@ mod tests { to_discover[swarm_n].remove(&other); }, DiscoveryOut::RandomKademliaStarted(_) => {}, - e => {panic!("Unexpected event: {:?}", e)}, + e => { + panic!("Unexpected event: {:?}", e) + }, } continue 'polling - } - _ => {} + }, + _ => {}, } } break @@ -973,7 +1053,8 @@ mod tests { let mut discovery = { let keypair = Keypair::generate_ed25519(); let mut config = DiscoveryConfig::new(keypair.public()); - config.allow_private_ipv4(true) + config + .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) .add_protocol(supported_protocol_id.clone()); @@ -992,7 +1073,8 @@ mod tests { for kademlia in discovery.kademlias.values_mut() { assert!( - kademlia.kbucket(remote_peer_id.clone()) + kademlia + .kbucket(remote_peer_id.clone()) .expect("Remote peer id not to be equal to local peer id.") .is_empty(), "Expect peer with unsupported protocol not to be added." @@ -1009,7 +1091,8 @@ mod tests { for kademlia in discovery.kademlias.values_mut() { assert_eq!( 1, - kademlia.kbucket(remote_peer_id.clone()) + kademlia + .kbucket(remote_peer_id.clone()) .expect("Remote peer id not to be equal to local peer id.") .num_entries(), "Expect peer with supported protocol to be added." @@ -1025,7 +1108,8 @@ mod tests { let mut discovery = { let keypair = Keypair::generate_ed25519(); let mut config = DiscoveryConfig::new(keypair.public()); - config.allow_private_ipv4(true) + config + .allow_private_ipv4(true) .allow_non_globals_in_dht(true) .discovery_limit(50) .add_protocol(protocol_a.clone()) @@ -1045,17 +1129,20 @@ mod tests { assert_eq!( 1, - discovery.kademlias.get_mut(&protocol_a) + discovery + .kademlias + .get_mut(&protocol_a) .expect("Kademlia instance to exist.") .kbucket(remote_peer_id.clone()) .expect("Remote peer id not to be equal to local peer id.") .num_entries(), "Expected remote peer to be added to `protocol_a` Kademlia instance.", - ); assert!( - discovery.kademlias.get_mut(&protocol_b) + discovery + .kademlias + .get_mut(&protocol_b) .expect("Kademlia instance to exist.") .kbucket(remote_peer_id.clone()) .expect("Remote peer id not to be equal to local peer id.") diff --git a/substrate/client/network/src/error.rs b/substrate/client/network/src/error.rs index 2a226b58b46a5e488e27134c0aeaf2a4c2fd7539..32fc6f9e1e31c8f6b139b0a2a11b300ed382b33f 100644 --- a/substrate/client/network/src/error.rs +++ b/substrate/client/network/src/error.rs @@ -19,7 +19,7 @@ //! Substrate network possible errors. use crate::config::TransportConfig; -use libp2p::{PeerId, Multiaddr}; +use libp2p::{Multiaddr, PeerId}; use std::{borrow::Cow, fmt}; @@ -38,7 +38,7 @@ pub enum Error { fmt = "The same bootnode (`{}`) is registered with two different peer ids: `{}` and `{}`", address, first_id, - second_id, + second_id )] DuplicateBootnode { /// The address of the bootnode. @@ -53,7 +53,7 @@ pub enum Error { /// The network addresses are invalid because they don't match the transport. #[display( fmt = "The following addresses are invalid because they don't match the transport: {:?}", - addresses, + addresses )] AddressesForAnotherTransport { /// Transport used. diff --git a/substrate/client/network/src/lib.rs b/substrate/client/network/src/lib.rs index b43836cacaa5408e1de6c93cfdbb5507663d45b2..c812390ec6a65fc27e667fe0668fbe784d7d6690 100644 --- a/substrate/client/network/src/lib.rs +++ b/substrate/client/network/src/lib.rs @@ -243,13 +243,12 @@ //! - Calling `trigger_repropagate` when a transaction is added to the pool. //! //! More precise usage details are still being worked on and will likely change in the future. -//! mod behaviour; mod chain; -mod peer_info; mod discovery; mod on_demand_layer; +mod peer_info; mod protocol; mod request_responses; mod schema; @@ -257,22 +256,25 @@ mod service; mod transport; mod utils; -pub mod block_request_handler; pub mod bitswap; -pub mod light_client_requests; -pub mod state_request_handler; +pub mod block_request_handler; pub mod config; pub mod error; +pub mod light_client_requests; pub mod network_state; +pub mod state_request_handler; pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; -pub use protocol::{event::{DhtEvent, Event, ObservedRole}, PeerInfo}; -pub use protocol::sync::{SyncState, StateDownloadProgress}; +pub use protocol::{ + event::{DhtEvent, Event, ObservedRole}, + sync::{StateDownloadProgress, SyncState}, + PeerInfo, +}; pub use service::{ - NetworkService, NetworkWorker, RequestFailure, OutboundFailure, NotificationSender, - NotificationSenderReady, IfDisconnected, + IfDisconnected, NetworkService, NetworkWorker, NotificationSender, NotificationSenderReady, + OutboundFailure, RequestFailure, }; pub use sc_peerset::ReputationChange; diff --git a/substrate/client/network/src/light_client_requests.rs b/substrate/client/network/src/light_client_requests.rs index f859a35f45b2461fdf5b0311357c82aa40283127..8489585e28831674ee051efca01c1d416ede4e06 100644 --- a/substrate/client/network/src/light_client_requests.rs +++ b/substrate/client/network/src/light_client_requests.rs @@ -18,13 +18,12 @@ //! Helpers for outgoing and incoming light client requests. -/// For outgoing light client requests. -pub mod sender; /// For incoming light client requests. pub mod handler; +/// For outgoing light client requests. +pub mod sender; -use crate::config::ProtocolId; -use crate::request_responses::ProtocolConfig; +use crate::{config::ProtocolId, request_responses::ProtocolConfig}; use std::time::Duration; @@ -51,24 +50,30 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { #[cfg(test)] mod tests { use super::*; - use crate::request_responses::IncomingRequest; - use crate::config::ProtocolId; + use crate::{config::ProtocolId, request_responses::IncomingRequest}; use assert_matches::assert_matches; - use futures::executor::{block_on, LocalPool}; - use futures::task::Spawn; - use futures::{channel::oneshot, prelude::*}; + use futures::{ + channel::oneshot, + executor::{block_on, LocalPool}, + prelude::*, + task::Spawn, + }; use libp2p::PeerId; - use sc_client_api::StorageProof; - use sc_client_api::light::{RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest}; - use sc_client_api::light::{self, RemoteReadRequest, RemoteBodyRequest, ChangesProof}; - use sc_client_api::{FetchChecker, RemoteReadChildRequest}; + use sc_client_api::{ + light::{ + self, ChangesProof, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, + RemoteHeaderRequest, RemoteReadRequest, + }, + FetchChecker, RemoteReadChildRequest, StorageProof, + }; use sp_blockchain::Error as ClientError; use sp_core::storage::ChildInfo; - use sp_runtime::generic::Header; - use sp_runtime::traits::{BlakeTwo256, Block as BlockT, NumberFor}; - use std::collections::HashMap; - use std::sync::Arc; + use sp_runtime::{ + generic::Header, + traits::{BlakeTwo256, Block as BlockT, NumberFor}, + }; + use std::{collections::HashMap, sync::Arc}; pub struct DummyFetchChecker { pub ok: bool, @@ -94,12 +99,7 @@ mod tests { _: StorageProof, ) -> Result, Option>>, ClientError> { match self.ok { - true => Ok(request - .keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect()), + true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), false => Err(ClientError::Backend("Test error".into())), } } @@ -110,12 +110,7 @@ mod tests { _: StorageProof, ) -> Result, Option>>, ClientError> { match self.ok { - true => Ok(request - .keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect()), + true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), false => Err(ClientError::Backend("Test error".into())), } } @@ -184,7 +179,8 @@ mod tests { fn send_receive(request: sender::Request, pool: &LocalPool) { let client = Arc::new(substrate_test_runtime_client::new()); - let (handler, protocol_config) = handler::LightClientRequestHandler::new(&protocol_id(), client); + let (handler, protocol_config) = + handler::LightClientRequestHandler::new(&protocol_id(), client); pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); let (_peer_set, peer_set_handle) = peerset(); @@ -199,18 +195,28 @@ mod tests { sender.inject_connected(PeerId::random()); sender.request(request).unwrap(); - let sender::OutEvent::SendRequest { pending_response, request, .. } = block_on(sender.next()).unwrap(); + let sender::OutEvent::SendRequest { pending_response, request, .. } = + block_on(sender.next()).unwrap(); let (tx, rx) = oneshot::channel(); block_on(protocol_config.inbound_queue.unwrap().send(IncomingRequest { peer: PeerId::random(), payload: request, pending_response: tx, - })).unwrap(); - pool.spawner().spawn_obj(async move { - pending_response.send(Ok(rx.await.unwrap().result.unwrap())).unwrap(); - }.boxed().into()).unwrap(); - - pool.spawner().spawn_obj(sender.for_each(|_| future::ready(())).boxed().into()).unwrap(); + })) + .unwrap(); + pool.spawner() + .spawn_obj( + async move { + pending_response.send(Ok(rx.await.unwrap().result.unwrap())).unwrap(); + } + .boxed() + .into(), + ) + .unwrap(); + + pool.spawner() + .spawn_obj(sender.for_each(|_| future::ready(())).boxed().into()) + .unwrap(); } #[test] @@ -225,10 +231,7 @@ mod tests { }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Call { - request, - sender: chan.0, - }, &pool); + send_receive(sender::Request::Call { request, sender: chan.0 }, &pool); assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_execution_proof` } @@ -243,17 +246,10 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Read { - request, - sender: chan.0, - }, &pool); + send_receive(sender::Request::Read { request, sender: chan.0 }, &pool); assert_eq!( Some(vec![42]), - pool.run_until(chan.1) - .unwrap() - .unwrap() - .remove(&b":key"[..]) - .unwrap() + pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() ); // ^--- from `DummyFetchChecker::check_read_proof` } @@ -270,17 +266,10 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::ReadChild { - request, - sender: chan.0, - }, &pool); + send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool); assert_eq!( Some(vec![42]), - pool.run_until(chan.1) - .unwrap() - .unwrap() - .remove(&b":key"[..]) - .unwrap() + pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() ); // ^--- from `DummyFetchChecker::check_read_child_proof` } @@ -295,15 +284,9 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Header { - request, - sender: chan.0, - }, &pool); + send_receive(sender::Request::Header { request, sender: chan.0 }, &pool); // The remote does not know block 1: - assert_matches!( - pool.run_until(chan.1).unwrap(), - Err(ClientError::RemoteFetchFailed) - ); + assert_matches!(pool.run_until(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); } #[test] @@ -324,10 +307,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Changes { - request, - sender: chan.0, - }, &pool); + send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool); assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_changes_proof` } diff --git a/substrate/client/network/src/light_client_requests/handler.rs b/substrate/client/network/src/light_client_requests/handler.rs index 1cfae0a3cb1dffd460037f3349902066305aee3a..609ed35f4a9d1fd60daad5bdf95aaeb7a558ab0d 100644 --- a/substrate/client/network/src/light_client_requests/handler.rs +++ b/substrate/client/network/src/light_client_requests/handler.rs @@ -22,34 +22,27 @@ //! [`crate::request_responses::RequestResponsesBehaviour`] with //! [`LightClientRequestHandler`](handler::LightClientRequestHandler). -use codec::{self, Encode, Decode}; use crate::{ chain::Client, config::ProtocolId, - schema, - PeerId, + request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + schema, PeerId, }; -use crate::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; -use futures::{channel::mpsc, prelude::*}; +use codec::{self, Decode, Encode}; +use futures::{channel::mpsc, prelude::*}; +use log::{debug, trace}; use prost::Message; -use sc_client_api::{ - StorageProof, - light -}; +use sc_client_api::{light, StorageProof}; use sc_peerset::ReputationChange; use sp_core::{ - storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, hexdisplay::HexDisplay, + storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageKey}, }; use sp_runtime::{ - traits::{Block, Zero}, generic::BlockId, + traits::{Block, Zero}, }; -use std::{ - collections::{BTreeMap}, - sync::Arc, -}; -use log::{trace, debug}; +use std::{collections::BTreeMap, sync::Arc}; const LOG_TARGET: &str = "light-client-request-handler"; @@ -62,10 +55,7 @@ pub struct LightClientRequestHandler { impl LightClientRequestHandler { /// Create a new [`crate::block_request_handler::BlockRequestHandler`]. - pub fn new( - protocol_id: &ProtocolId, - client: Arc>, - ) -> (Self, ProtocolConfig) { + pub fn new(protocol_id: &ProtocolId, client: Arc>) -> (Self, ProtocolConfig) { // For now due to lack of data on light client request handling in production systems, this // value is chosen to match the block request limit. let (tx, request_receiver) = mpsc::channel(20); @@ -86,7 +76,7 @@ impl LightClientRequestHandler { let response = OutgoingResponse { result: Ok(response_data), reputation_changes: Vec::new(), - sent_feedback: None + sent_feedback: None, }; match pending_response.send(response) { @@ -98,35 +88,36 @@ impl LightClientRequestHandler { Err(_) => debug!( target: LOG_TARGET, "Failed to handle light client request from {}: {}", - peer, HandleRequestError::SendResponse, + peer, + HandleRequestError::SendResponse, ), }; - } , + }, Err(e) => { debug!( target: LOG_TARGET, - "Failed to handle light client request from {}: {}", - peer, e, + "Failed to handle light client request from {}: {}", peer, e, ); let reputation_changes = match e { HandleRequestError::BadRequest(_) => { vec![ReputationChange::new(-(1 << 12), "bad request")] - } + }, _ => Vec::new(), }; let response = OutgoingResponse { result: Err(()), reputation_changes, - sent_feedback: None + sent_feedback: None, }; if pending_response.send(response).is_err() { debug!( target: LOG_TARGET, "Failed to handle light client request from {}: {}", - peer, HandleRequestError::SendResponse, + peer, + HandleRequestError::SendResponse, ); }; }, @@ -134,7 +125,6 @@ impl LightClientRequestHandler { } } - fn handle_request( &mut self, peer: PeerId, @@ -153,9 +143,8 @@ impl LightClientRequestHandler { self.on_remote_read_child_request(&peer, r)?, Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => self.on_remote_changes_request(&peer, r)?, - None => { - return Err(HandleRequestError::BadRequest("Remote request without request data.")); - } + None => + return Err(HandleRequestError::BadRequest("Remote request without request data.")), }; let mut data = Vec::new(); @@ -171,24 +160,30 @@ impl LightClientRequestHandler { ) -> Result { log::trace!( "Remote call request from {} ({} at {:?}).", - peer, request.method, request.block, + peer, + request.method, + request.block, ); let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self.client.execution_proof( - &BlockId::Hash(block), - &request.method, &request.data, - ) { - Ok((_, proof)) => proof, - Err(e) => { - log::trace!( - "remote call request from {} ({} at {:?}) failed with: {}", - peer, request.method, request.block, e, - ); - StorageProof::empty() - } - }; + let proof = + match self + .client + .execution_proof(&BlockId::Hash(block), &request.method, &request.data) + { + Ok((_, proof)) => proof, + Err(e) => { + log::trace!( + "remote call request from {} ({} at {:?}) failed with: {}", + peer, + request.method, + request.block, + e, + ); + StorageProof::empty() + }, + }; let response = { let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; @@ -210,23 +205,28 @@ impl LightClientRequestHandler { log::trace!( "Remote read request from {} ({} at {:?}).", - peer, fmt_keys(request.keys.first(), request.keys.last()), request.block, + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, ); let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self.client.read_proof( - &BlockId::Hash(block), - &mut request.keys.iter().map(AsRef::as_ref), - ) { + let proof = match self + .client + .read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) + { Ok(proof) => proof, Err(error) => { log::trace!( "remote read request from {} ({} at {:?}) failed with: {}", - peer, fmt_keys(request.keys.first(), request.keys.last()), request.block, error, + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error, ); StorageProof::empty() - } + }, }; let response = { @@ -262,11 +262,13 @@ impl LightClientRequestHandler { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err(sp_blockchain::Error::InvalidChildStorageKey), }; - let proof = match child_info.and_then(|child_info| self.client.read_child_proof( - &BlockId::Hash(block), - &child_info, - &mut request.keys.iter().map(AsRef::as_ref) - )) { + let proof = match child_info.and_then(|child_info| { + self.client.read_child_proof( + &BlockId::Hash(block), + &child_info, + &mut request.keys.iter().map(AsRef::as_ref), + ) + }) { Ok(proof) => proof, Err(error) => { log::trace!( @@ -278,7 +280,7 @@ impl LightClientRequestHandler { error, ); StorageProof::empty() - } + }, }; let response = { @@ -302,10 +304,12 @@ impl LightClientRequestHandler { Err(error) => { log::trace!( "Remote header proof request from {} ({:?}) failed with: {}.", - peer, request.block, error + peer, + request.block, + error ); (Default::default(), StorageProof::empty()) - } + }, }; let response = { @@ -325,7 +329,11 @@ impl LightClientRequestHandler { "Remote changes proof request from {} for key {} ({:?}..{:?}).", peer, if !request.storage_key.is_empty() { - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&request.key)) + format!( + "{} : {}", + HexDisplay::from(&request.storage_key), + HexDisplay::from(&request.key) + ) } else { HexDisplay::from(&request.key).to_string() }, @@ -344,10 +352,11 @@ impl LightClientRequestHandler { Some(PrefixedStorageKey::new_ref(&request.storage_key)) }; - let proof = match self.client.key_changes_proof(first, last, min, max, storage_key, &key) { - Ok(proof) => proof, - Err(error) => { - log::trace!( + let proof = + match self.client.key_changes_proof(first, last, min, max, storage_key, &key) { + Ok(proof) => proof, + Err(error) => { + log::trace!( "Remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}.", peer, format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), @@ -356,20 +365,22 @@ impl LightClientRequestHandler { error, ); - light::ChangesProof:: { - max_block: Zero::zero(), - proof: Vec::new(), - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - } - }; + light::ChangesProof:: { + max_block: Zero::zero(), + proof: Vec::new(), + roots: BTreeMap::new(), + roots_proof: StorageProof::empty(), + } + }, + }; let response = { let r = schema::v1::light::RemoteChangesResponse { max: proof.max_block.encode(), proof: proof.proof, - roots: proof.roots.into_iter() + roots: proof + .roots + .into_iter() .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) .collect(), roots_proof: proof.roots_proof.encode(), diff --git a/substrate/client/network/src/light_client_requests/sender.rs b/substrate/client/network/src/light_client_requests/sender.rs index 77efa1b982e7a59ebb308c8c485c2c0982482945..2320d3bcb678c4aec1a2e6bab4d208b035c30725 100644 --- a/substrate/client/network/src/light_client_requests/sender.rs +++ b/substrate/client/network/src/light_client_requests/sender.rs @@ -29,28 +29,21 @@ //! 3. Wait for the response and forward the response via the [`futures::channel::oneshot::Sender`] provided earlier //! with [`LightClientRequestSender::request`](sender::LightClientRequestSender::request). -use codec::{self, Encode, Decode}; use crate::{ config::ProtocolId, - protocol::message::{BlockAttributes}, - schema, - PeerId, + protocol::message::BlockAttributes, + request_responses::{OutboundFailure, RequestFailure}, + schema, PeerId, }; -use crate::request_responses::{RequestFailure, OutboundFailure}; -use futures::{channel::{oneshot}, future::BoxFuture, prelude::*, stream::FuturesUnordered}; +use codec::{self, Decode, Encode}; +use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; use prost::Message; -use sc_client_api::{ - light::{ - self, RemoteBodyRequest, - } -}; +use sc_client_api::light::{self, RemoteBodyRequest}; use sc_peerset::ReputationChange; -use sp_blockchain::{Error as ClientError}; -use sp_runtime::{ - traits::{Block, Header, NumberFor}, -}; +use sp_blockchain::Error as ClientError; +use sp_runtime::traits::{Block, Header, NumberFor}; use std::{ - collections::{BTreeMap, VecDeque, HashMap}, + collections::{BTreeMap, HashMap, VecDeque}, pin::Pin, sync::Arc, task::{Context, Poll}, @@ -60,9 +53,11 @@ mod rep { use super::*; /// Reputation change for a peer when a request timed out. - pub const TIMEOUT: ReputationChange = ReputationChange::new(-(1 << 8), "light client request timeout"); + pub const TIMEOUT: ReputationChange = + ReputationChange::new(-(1 << 8), "light client request timeout"); /// Reputation change for a peer when a request is refused. - pub const REFUSED: ReputationChange = ReputationChange::new(-(1 << 8), "light client request refused"); + pub const REFUSED: ReputationChange = + ReputationChange::new(-(1 << 8), "light client request refused"); } /// Configuration options for [`LightClientRequestSender`]. @@ -95,9 +90,12 @@ pub struct LightClientRequestSender { /// Pending (local) requests. pending_requests: VecDeque>, /// Requests on their way to remote peers. - sent_requests: FuturesUnordered, Result, RequestFailure>, oneshot::Canceled>), - >>, + sent_requests: FuturesUnordered< + BoxFuture< + 'static, + (SentRequest, Result, RequestFailure>, oneshot::Canceled>), + >, + >, /// Handle to use for reporting misbehaviour of peers. peerset: sc_peerset::PeersetHandle, } @@ -121,11 +119,7 @@ impl PendingRequest { } fn into_sent(self, peer_id: PeerId) -> SentRequest { - SentRequest { - attempts_left: self.attempts_left, - request: self.request, - peer: peer_id, - } + SentRequest { attempts_left: self.attempts_left, request: self.request, peer: peer_id } } } @@ -142,10 +136,7 @@ struct SentRequest { impl SentRequest { fn into_pending(self) -> PendingRequest { - PendingRequest { - attempts_left: self.attempts_left, - request: self.request, - } + PendingRequest { attempts_left: self.attempts_left, request: self.request } } } @@ -206,7 +197,7 @@ where peer: PeerId, request: &Request, response: Response, - ) -> Result, Error> { + ) -> Result, Error> { log::trace!("response from {}", peer); match response { Response::Light(r) => self.on_response_light(request, r), @@ -222,27 +213,26 @@ where use schema::v1::light::response::Response; match response.response { Some(Response::RemoteCallResponse(response)) => - if let Request::Call { request , .. } = request { + if let Request::Call { request, .. } = request { let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_execution_proof(request, proof)?; Ok(Reply::VecU8(reply)) } else { Err(Error::UnexpectedResponse) - } - Some(Response::RemoteReadResponse(response)) => - match request { - Request::Read { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - Request::ReadChild { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - _ => Err(Error::UnexpectedResponse) - } + }, + Some(Response::RemoteReadResponse(response)) => match request { + Request::Read { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + }, + Request::ReadChild { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_child_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + }, + _ => Err(Error::UnexpectedResponse), + }, Some(Response::RemoteChangesResponse(response)) => if let Request::Changes { request, .. } = request { let max_block = Decode::decode(&mut response.max.as_ref())?; @@ -256,31 +246,33 @@ where } r }; - let reply = self.checker.check_changes_proof(&request, light::ChangesProof { - max_block, - proof: response.proof, - roots, - roots_proof, - })?; + let reply = self.checker.check_changes_proof( + &request, + light::ChangesProof { + max_block, + proof: response.proof, + roots, + roots_proof, + }, + )?; Ok(Reply::VecNumberU32(reply)) } else { Err(Error::UnexpectedResponse) - } + }, Some(Response::RemoteHeaderResponse(response)) => if let Request::Header { request, .. } = request { - let header = - if response.header.is_empty() { - None - } else { - Some(Decode::decode(&mut response.header.as_ref())?) - }; + let header = if response.header.is_empty() { + None + } else { + Some(Decode::decode(&mut response.header.as_ref())?) + }; let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_header_proof(&request, header, proof)?; Ok(Reply::Header(reply)) } else { Err(Error::UnexpectedResponse) - } - None => Err(Error::UnexpectedResponse) + }, + None => Err(Error::UnexpectedResponse), } } @@ -289,10 +281,10 @@ where request: &Request, response: schema::v1::BlockResponse, ) -> Result, Error> { - let request = if let Request::Body { request , .. } = &request { + let request = if let Request::Body { request, .. } = &request { request } else { - return Err(Error::UnexpectedResponse); + return Err(Error::UnexpectedResponse) }; let body: Vec<_> = match response.blocks.into_iter().next() { @@ -300,7 +292,8 @@ where None => return Err(Error::UnexpectedResponse), }; - let body = body.into_iter() + let body = body + .into_iter() .map(|extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) .collect::>()?; @@ -323,13 +316,14 @@ where } } - impl Stream for LightClientRequestSender { type Item = OutEvent; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { // If we have received responses to previously sent requests, check them and pass them on. - while let Poll::Ready(Some((sent_request, request_result))) = self.sent_requests.poll_next_unpin(cx) { + while let Poll::Ready(Some((sent_request, request_result))) = + self.sent_requests.poll_next_unpin(cx) + { if let Some(info) = self.peers.get_mut(&sent_request.peer) { if info.status != PeerStatus::Busy { // If we get here, something is wrong with our internal handling of peer status @@ -347,30 +341,38 @@ impl Stream for LightClientRequestSender { Err(oneshot::Canceled) => { log::debug!("Oneshot for request to peer {} was canceled.", sent_request.peer); self.remove_peer(sent_request.peer); - self.peerset.report_peer(sent_request.peer, ReputationChange::new_fatal("no response from peer")); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("no response from peer"), + ); self.pending_requests.push_back(sent_request.into_pending()); - continue; - } + continue + }, }; let decoded_request_result = request_result.map(|response| { if sent_request.request.is_block_request() { - schema::v1::BlockResponse::decode(&response[..]) - .map(|r| Response::Block(r)) + schema::v1::BlockResponse::decode(&response[..]).map(|r| Response::Block(r)) } else { - schema::v1::light::Response::decode(&response[..]) - .map(|r| Response::Light(r)) + schema::v1::light::Response::decode(&response[..]).map(|r| Response::Light(r)) } }); let response = match decoded_request_result { Ok(Ok(response)) => response, Ok(Err(e)) => { - log::debug!("Failed to decode response from peer {}: {:?}.", sent_request.peer, e); + log::debug!( + "Failed to decode response from peer {}: {:?}.", + sent_request.peer, + e + ); self.remove_peer(sent_request.peer); - self.peerset.report_peer(sent_request.peer, ReputationChange::new_fatal("invalid response from peer")); + self.peerset.report_peer( + sent_request.peer, + ReputationChange::new_fatal("invalid response from peer"), + ); self.pending_requests.push_back(sent_request.into_pending()); - continue; + continue }, Err(e) => { log::debug!("Request to peer {} failed with {:?}.", sent_request.peer, e); @@ -379,22 +381,19 @@ impl Stream for LightClientRequestSender { RequestFailure::NotConnected => { self.remove_peer(sent_request.peer); self.pending_requests.push_back(sent_request.into_pending()); - } + }, RequestFailure::UnknownProtocol => { debug_assert!( false, "Light client and block request protocol should be known when \ sending requests.", ); - } + }, RequestFailure::Refused => { self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - rep::REFUSED, - ); + self.peerset.report_peer(sent_request.peer, rep::REFUSED); self.pending_requests.push_back(sent_request.into_pending()); - } + }, RequestFailure::Obsolete => { debug_assert!( false, @@ -402,13 +401,10 @@ impl Stream for LightClientRequestSender { response receiver.", ); self.pending_requests.push_back(sent_request.into_pending()); - } + }, RequestFailure::Network(OutboundFailure::Timeout) => { self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - rep::TIMEOUT, - ); + self.peerset.report_peer(sent_request.peer, rep::TIMEOUT); self.pending_requests.push_back(sent_request.into_pending()); }, RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { @@ -420,31 +416,27 @@ impl Stream for LightClientRequestSender { ), ); self.pending_requests.push_back(sent_request.into_pending()); - } + }, RequestFailure::Network(OutboundFailure::DialFailure) => { self.remove_peer(sent_request.peer); self.peerset.report_peer( sent_request.peer, - ReputationChange::new_fatal( - "failed to dial peer", - ), + ReputationChange::new_fatal("failed to dial peer"), ); self.pending_requests.push_back(sent_request.into_pending()); - } + }, RequestFailure::Network(OutboundFailure::ConnectionClosed) => { self.remove_peer(sent_request.peer); self.peerset.report_peer( sent_request.peer, - ReputationChange::new_fatal( - "connection to peer closed", - ), + ReputationChange::new_fatal("connection to peer closed"), ); self.pending_requests.push_back(sent_request.into_pending()); - } + }, } - continue; - } + continue + }, }; match self.on_response(sent_request.peer, &sent_request.request, response) { @@ -454,23 +446,23 @@ impl Stream for LightClientRequestSender { self.remove_peer(sent_request.peer); self.peerset.report_peer( sent_request.peer, - ReputationChange::new_fatal( - "unexpected response from peer", - ), + ReputationChange::new_fatal("unexpected response from peer"), ); self.pending_requests.push_back(sent_request.into_pending()); - } + }, Err(other) => { - log::debug!("error handling response from peer {}: {}", sent_request.peer, other); + log::debug!( + "error handling response from peer {}: {}", + sent_request.peer, + other + ); self.remove_peer(sent_request.peer); self.peerset.report_peer( sent_request.peer, - ReputationChange::new_fatal( - "invalid response from peer", - ), + ReputationChange::new_fatal("invalid response from peer"), ); self.pending_requests.push_back(sent_request.into_pending()) - } + }, } } @@ -497,7 +489,7 @@ impl Stream for LightClientRequestSender { peer = Some((*peer_id, peer_info)); break }, - _ => peer = Some((*peer_id, peer_info)) + _ => peer = Some((*peer_id, peer_info)), } } } @@ -509,8 +501,8 @@ impl Stream for LightClientRequestSender { self.pending_requests.push_front(pending_request); log::debug!("No peer available to send request to."); - break; - } + break + }, }; let request_bytes = match pending_request.request.serialize_request() { @@ -519,7 +511,7 @@ impl Stream for LightClientRequestSender { log::debug!("failed to serialize request: {}", error); pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); continue - } + }, }; let (tx, rx) = oneshot::channel(); @@ -528,16 +520,15 @@ impl Stream for LightClientRequestSender { pending_request.attempts_left -= 1; - self.sent_requests.push(async move { - (pending_request.into_sent(peer_id), rx.await) - }.boxed()); + self.sent_requests + .push(async move { (pending_request.into_sent(peer_id), rx.await) }.boxed()); return Poll::Ready(Some(OutEvent::SendRequest { target: peer_id, request: request_bytes, pending_response: tx, protocol_name: protocol, - })); + })) } Poll::Pending @@ -557,7 +548,7 @@ pub enum OutEvent { pending_response: oneshot::Sender, RequestFailure>>, /// The name of the protocol to use to send the request. protocol_name: String, - } + }, } /// Incoming response from remote. @@ -592,7 +583,6 @@ enum Error { } /// The data to send back to the light client over the oneshot channel. -// // It is unified here in order to be able to return it as a function // result instead of delivering it to the client as a side effect of // response processing. @@ -605,7 +595,6 @@ enum Reply { Extrinsics(Vec), } - /// Information we have about some peer. #[derive(Debug)] struct PeerInfo { @@ -615,10 +604,7 @@ struct PeerInfo { impl Default for PeerInfo { fn default() -> Self { - PeerInfo { - best_block: None, - status: PeerStatus::Idle, - } + PeerInfo { best_block: None, status: PeerStatus::Idle } } } @@ -635,7 +621,6 @@ enum PeerStatus { /// /// The associated `oneshot::Sender` will be used to convey the result of /// their request back to them (cf. `Reply`). -// // This is modeled after light_dispatch.rs's `RequestData` which is not // used because we currently only support a subset of those. #[derive(Debug)] @@ -645,43 +630,43 @@ pub enum Request { /// Request. request: RemoteBodyRequest, /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, ClientError>> + sender: oneshot::Sender, ClientError>>, }, /// Remote header request. Header { /// Request. request: light::RemoteHeaderRequest, /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender> + sender: oneshot::Sender>, }, /// Remote read request. Read { /// Request. request: light::RemoteReadRequest, /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, Option>>, ClientError>> + sender: oneshot::Sender, Option>>, ClientError>>, }, /// Remote read child request. ReadChild { /// Request. request: light::RemoteReadChildRequest, /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, Option>>, ClientError>> + sender: oneshot::Sender, Option>>, ClientError>>, }, /// Remote call request. Call { /// Request. request: light::RemoteCallRequest, /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, ClientError>> + sender: oneshot::Sender, ClientError>>, }, /// Remote changes request. Changes { /// Request. request: light::RemoteChangesRequest, /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, u32)>, ClientError>> - } + sender: oneshot::Sender, u32)>, ClientError>>, + }, } impl Request { @@ -728,19 +713,19 @@ impl Request { let mut buf = Vec::with_capacity(rq.encoded_len()); rq.encode(&mut buf)?; - return Ok(buf); - } + return Ok(buf) + }, Request::Header { request, .. } => { let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; schema::v1::light::request::Request::RemoteHeaderRequest(r) - } + }, Request::Read { request, .. } => { let r = schema::v1::light::RemoteReadRequest { block: request.block.encode(), keys: request.keys.clone(), }; schema::v1::light::request::Request::RemoteReadRequest(r) - } + }, Request::ReadChild { request, .. } => { let r = schema::v1::light::RemoteReadChildRequest { block: request.block.encode(), @@ -748,7 +733,7 @@ impl Request { keys: request.keys.clone(), }; schema::v1::light::request::Request::RemoteReadChildRequest(r) - } + }, Request::Call { request, .. } => { let r = schema::v1::light::RemoteCallRequest { block: request.block.encode(), @@ -756,19 +741,22 @@ impl Request { data: request.call_data.clone(), }; schema::v1::light::request::Request::RemoteCallRequest(r) - } + }, Request::Changes { request, .. } => { let r = schema::v1::light::RemoteChangesRequest { first: request.first_block.1.encode(), last: request.last_block.1.encode(), min: request.tries_roots.1.encode(), max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().map(|s| s.into_inner()) + storage_key: request + .storage_key + .clone() + .map(|s| s.into_inner()) .unwrap_or_default(), key: request.key.clone(), }; schema::v1::light::request::Request::RemoteChangesRequest(r) - } + }, }; let rq = schema::v1::light::Request { request: Some(request) }; @@ -786,32 +774,35 @@ impl Request { Err(e) => send(Err(e), sender), Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), - } + }, Request::Header { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::Header(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for header request: {:?}, {:?}", reply, request), - } + reply => + log::error!("invalid reply for header request: {:?}, {:?}", reply, request), + }, Request::Read { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), - } + }, Request::ReadChild { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), - } + reply => + log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), + }, Request::Call { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::VecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), - } + }, Request::Changes { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for changes request: {:?}, {:?}", reply, request), - } + reply => + log::error!("invalid reply for changes request: {:?}, {:?}", reply, request), + }, } } } @@ -819,19 +810,17 @@ impl Request { #[cfg(test)] mod tests { use super::*; - use crate::light_client_requests::tests::{DummyFetchChecker, protocol_id, peerset, dummy_header}; - use crate::request_responses::OutboundFailure; + use crate::{ + light_client_requests::tests::{dummy_header, peerset, protocol_id, DummyFetchChecker}, + request_responses::OutboundFailure, + }; use assert_matches::assert_matches; - use futures::channel::oneshot; - use futures::executor::block_on; - use futures::poll; + use futures::{channel::oneshot, executor::block_on, poll}; use sc_client_api::StorageProof; use sp_core::storage::ChildInfo; - use sp_runtime::generic::Header; - use sp_runtime::traits::BlakeTwo256; - use std::collections::HashSet; - use std::iter::FromIterator; + use sp_runtime::{generic::Header, traits::BlakeTwo256}; + use std::{collections::HashSet, iter::FromIterator}; fn empty_proof() -> Vec { StorageProof::empty().encode() @@ -843,10 +832,7 @@ mod tests { let (_peer_set, peer_set_handle) = peerset(); let mut sender = LightClientRequestSender::::new( &protocol_id(), - Arc::new(DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), + Arc::new(DummyFetchChecker { ok: true, _mark: std::marker::PhantomData }), peer_set_handle, ); @@ -864,17 +850,15 @@ mod tests { fn body_request_fields_encoded_properly() { let (sender, _receiver) = oneshot::channel(); let request = Request::::Body { - request: RemoteBodyRequest { - header: dummy_header(), - retry_count: None, - }, + request: RemoteBodyRequest { header: dummy_header(), retry_count: None }, sender, }; let serialized_request = request.serialize_request().unwrap(); - let deserialized_request = schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); + let deserialized_request = + schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); assert!(BlockAttributes::from_be_u32(deserialized_request.fields) - .unwrap() - .contains(BlockAttributes::BODY)); + .unwrap() + .contains(BlockAttributes::BODY)); } #[test] @@ -916,29 +900,26 @@ mod tests { sender.request(Request::Call { request, sender: chan.0 }).unwrap(); assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); - let OutEvent::SendRequest { target, pending_response, .. } = block_on(sender.next()).unwrap(); - assert!( - target == peer0 || target == peer1, - "Expect request to originate from known peer.", - ); + let OutEvent::SendRequest { target, pending_response, .. } = + block_on(sender.next()).unwrap(); + assert!(target == peer0 || target == peer1, "Expect request to originate from known peer.",); // And we should have one busy peer. assert!({ - let (idle, busy): (Vec<_>, Vec<_>) = sender - .peers - .iter() - .partition(|(_, info)| info.status == PeerStatus::Idle); - idle.len() == 1 - && busy.len() == 1 - && (idle[0].0 == &peer0 || busy[0].0 == &peer0) - && (idle[0].0 == &peer1 || busy[0].0 == &peer1) + let (idle, busy): (Vec<_>, Vec<_>) = + sender.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); + idle.len() == 1 && + busy.len() == 1 && (idle[0].0 == &peer0 || busy[0].0 == &peer0) && + (idle[0].0 == &peer1 || busy[0].0 == &peer1) }); assert_eq!(0, sender.pending_requests.len(), "Expect no pending request."); assert_eq!(1, sender.sent_requests.len(), "Expect one request to be sent."); // Report first attempt as timed out. - pending_response.send(Err(RequestFailure::Network(OutboundFailure::Timeout))).unwrap(); + pending_response + .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) + .unwrap(); // Expect a new request to be issued. let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); @@ -948,13 +929,17 @@ mod tests { assert_eq!(1, sender.sent_requests.len(), "Expect new request to be issued."); // Report second attempt as timed out. - pending_response.send(Err(RequestFailure::Network(OutboundFailure::Timeout))).unwrap(); + pending_response + .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) + .unwrap(); assert_matches!( - block_on(async { poll!(sender.next()) }), Poll::Pending, + block_on(async { poll!(sender.next()) }), + Poll::Pending, "Expect sender to not issue another attempt.", ); assert_matches!( - block_on(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed), + block_on(chan.1).unwrap(), + Err(ClientError::RemoteFetchFailed), "Expect request failure to be reported.", ); assert_eq!(0, sender.peers.len(), "Expect no peer to be left"); @@ -988,12 +973,7 @@ mod tests { call_data: vec![], retry_count: Some(1), }; - sender - .request(Request::Call { - request, - sender: chan.0, - }) - .unwrap(); + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); assert_eq!(0, sender.sent_requests.len(), "Expect zero sent requests."); @@ -1003,9 +983,7 @@ mod tests { assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); let response = { - let r = schema::v1::light::RemoteCallResponse { - proof: empty_proof(), - }; + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; let response = schema::v1::light::Response { response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), }; @@ -1017,7 +995,8 @@ mod tests { pending_response.send(Ok(response)).unwrap(); assert_matches!( - block_on(async { poll!(sender.next()) }), Poll::Pending, + block_on(async { poll!(sender.next()) }), + Poll::Pending, "Expect sender to not issue another attempt, given that there is no peer left.", ); @@ -1050,12 +1029,7 @@ mod tests { call_data: vec![], retry_count: Some(1), }; - sender - .request(Request::Call { - request, - sender: chan.0, - }) - .unwrap(); + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); assert_eq!(1, sender.pending_requests.len()); assert_eq!(0, sender.sent_requests.len()); @@ -1064,9 +1038,7 @@ mod tests { assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); let response = { - let r = schema::v1::light::RemoteReadResponse { - proof: empty_proof(), - }; // Not a RemoteCallResponse! + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! let response = schema::v1::light::Response { response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), }; @@ -1077,7 +1049,8 @@ mod tests { pending_response.send(Ok(response)).unwrap(); assert_matches!( - block_on(async { poll!(sender.next()) }), Poll::Pending, + block_on(async { poll!(sender.next()) }), + Poll::Pending, "Expect sender to not issue another attempt, given that there is no peer left.", ); @@ -1114,12 +1087,7 @@ mod tests { call_data: vec![], retry_count: Some(3), // Attempt up to three retries. }; - sender - .request(Request::Call { - request, - sender: chan.0, - }) - .unwrap(); + sender.request(Request::Call { request, sender: chan.0 }).unwrap(); assert_eq!(1, sender.pending_requests.len()); assert_eq!(0, sender.sent_requests.len()); @@ -1132,9 +1100,7 @@ mod tests { for (i, _peer) in peers.iter().enumerate() { // Construct an invalid response let response = { - let r = schema::v1::light::RemoteCallResponse { - proof: empty_proof(), - }; + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; let response = schema::v1::light::Response { response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), }; @@ -1152,13 +1118,11 @@ mod tests { } else { // Last peer and last attempt. assert_matches!( - block_on(async { poll!(sender.next()) }), Poll::Pending, + block_on(async { poll!(sender.next()) }), + Poll::Pending, "Expect sender to not issue another attempt, given that there is no peer left.", ); - assert_matches!( - chan.1.try_recv(), - Ok(Some(Err(ClientError::RemoteFetchFailed))) - ) + assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) } } } @@ -1187,35 +1151,27 @@ mod tests { proof: empty_proof(), }; schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteHeaderResponse( - r, - )), + response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)), } - } + }, Request::Read { .. } => { - let r = schema::v1::light::RemoteReadResponse { - proof: empty_proof(), - }; + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; schema::v1::light::Response { response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), } - } + }, Request::ReadChild { .. } => { - let r = schema::v1::light::RemoteReadResponse { - proof: empty_proof(), - }; + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; schema::v1::light::Response { response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), } - } + }, Request::Call { .. } => { - let r = schema::v1::light::RemoteCallResponse { - proof: empty_proof(), - }; + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; schema::v1::light::Response { response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), } - } + }, Request::Changes { .. } => { let r = schema::v1::light::RemoteChangesResponse { max: std::iter::repeat(1).take(32).collect(), @@ -1226,7 +1182,7 @@ mod tests { schema::v1::light::Response { response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), } - } + }, }; let response = { @@ -1245,7 +1201,8 @@ mod tests { pending_response.send(Ok(response)).unwrap(); assert_matches!( - block_on(async { poll!(sender.next()) }), Poll::Pending, + block_on(async { poll!(sender.next()) }), + Poll::Pending, "Expect sender to not issue another attempt, given that there is no peer left.", ); @@ -1263,10 +1220,7 @@ mod tests { call_data: vec![], retry_count: None, }; - issue_request(Request::Call { - request, - sender: chan.0, - }); + issue_request(Request::Call { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1279,10 +1233,7 @@ mod tests { keys: vec![b":key".to_vec()], retry_count: None, }; - issue_request(Request::Read { - request, - sender: chan.0, - }); + issue_request(Request::Read { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1297,10 +1248,7 @@ mod tests { keys: vec![b":key".to_vec()], retry_count: None, }; - issue_request(Request::ReadChild { - request, - sender: chan.0, - }); + issue_request(Request::ReadChild { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1312,10 +1260,7 @@ mod tests { block: 1, retry_count: None, }; - issue_request(Request::Header { - request, - sender: chan.0, - }); + issue_request(Request::Header { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1336,10 +1281,7 @@ mod tests { storage_key: None, retry_count: None, }; - issue_request(Request::Changes { - request, - sender: chan.0, - }); + issue_request(Request::Changes { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } } diff --git a/substrate/client/network/src/network_state.rs b/substrate/client/network/src/network_state.rs index 4ddfadda172e4793346da92f6acd215252e7cc60..3f3d0596f16a052d4469ca4835a79936bd0cb130 100644 --- a/substrate/client/network/src/network_state.rs +++ b/substrate/client/network/src/network_state.rs @@ -22,7 +22,10 @@ use libp2p::{core::ConnectedPoint, Multiaddr}; use serde::{Deserialize, Serialize}; -use std::{collections::{HashMap, HashSet}, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + time::Duration, +}; /// Returns general information about the networking. /// @@ -90,13 +93,9 @@ pub enum PeerEndpoint { impl From for PeerEndpoint { fn from(endpoint: ConnectedPoint) -> Self { match endpoint { - ConnectedPoint::Dialer { address } => - PeerEndpoint::Dialing(address), + ConnectedPoint::Dialer { address } => PeerEndpoint::Dialing(address), ConnectedPoint::Listener { local_addr, send_back_addr } => - PeerEndpoint::Listening { - local_addr, - send_back_addr - } + PeerEndpoint::Listening { local_addr, send_back_addr }, } } } diff --git a/substrate/client/network/src/on_demand_layer.rs b/substrate/client/network/src/on_demand_layer.rs index ef8076e8cbed7988c4c262dc79065472a59699fd..ebcf012c0faef446d171b66808f2dfcc7cb7d15d 100644 --- a/substrate/client/network/src/on_demand_layer.rs +++ b/substrate/client/network/src/on_demand_layer.rs @@ -23,13 +23,19 @@ use crate::light_client_requests; use futures::{channel::oneshot, prelude::*}; use parking_lot::Mutex; use sc_client_api::{ - FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, - RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, StorageProof, ChangesProof, + ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, + RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, + StorageProof, }; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + collections::HashMap, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; /// Implements the `Fetcher` trait of the client. Makes it possible for the light client to perform /// network requests for some state. @@ -45,13 +51,13 @@ pub struct OnDemand { /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in /// asynchronous Rust at the moment - requests_queue: Mutex>>>, + requests_queue: + Mutex>>>, /// Sending side of `requests_queue`. requests_send: TracingUnboundedSender>, } - #[derive(Debug, thiserror::Error)] #[error("AlwaysBadChecker")] struct ErrorAlwaysBadChecker; @@ -83,7 +89,7 @@ impl FetchChecker for AlwaysBadChecker { &self, _request: &RemoteReadRequest, _remote_proof: StorageProof, - ) -> Result,Option>>, ClientError> { + ) -> Result, Option>>, ClientError> { Err(ErrorAlwaysBadChecker.into()) } @@ -106,7 +112,7 @@ impl FetchChecker for AlwaysBadChecker { fn check_changes_proof( &self, _request: &RemoteChangesRequest, - _remote_proof: ChangesProof + _remote_proof: ChangesProof, ) -> Result, u32)>, ClientError> { Err(ErrorAlwaysBadChecker.into()) } @@ -114,7 +120,7 @@ impl FetchChecker for AlwaysBadChecker { fn check_body_proof( &self, _request: &RemoteBodyRequest, - _body: Vec + _body: Vec, ) -> Result, ClientError> { Err(ErrorAlwaysBadChecker.into()) } @@ -129,11 +135,7 @@ where let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); let requests_queue = Mutex::new(Some(requests_queue)); - OnDemand { - checker, - requests_queue, - requests_send, - } + OnDemand { checker, requests_queue, requests_send } } /// Get checker reference. @@ -148,9 +150,9 @@ where /// /// If this function returns `None`, that means that the receiver has already been extracted in /// the past, and therefore that something already handles the requests. - pub(crate) fn extract_receiver(&self) - -> Option>> - { + pub(crate) fn extract_receiver( + &self, + ) -> Option>> { self.requests_queue.lock().take() } } diff --git a/substrate/client/network/src/peer_info.rs b/substrate/client/network/src/peer_info.rs index 39bbd1d87046062a0075e30dc52fe9462117c77d..a123482be0727eaca43cddf74cefd090e786d39e 100644 --- a/substrate/client/network/src/peer_info.rs +++ b/substrate/client/network/src/peer_info.rs @@ -16,24 +16,33 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::utils::interval; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::Multiaddr; -use libp2p::core::connection::{ConnectionId, ListenerId}; -use libp2p::core::{ConnectedPoint, either::EitherOutput, PeerId, PublicKey}; -use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}; -use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; -use log::{debug, trace, error}; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + either::EitherOutput, + ConnectedPoint, PeerId, PublicKey, + }, + identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}, + ping::{Ping, PingConfig, PingEvent, PingSuccess}, + swarm::{ + IntoProtocolsHandler, IntoProtocolsHandlerSelect, NetworkBehaviour, NetworkBehaviourAction, + PollParameters, ProtocolsHandler, + }, + Multiaddr, +}; +use log::{debug, error, trace}; use smallvec::SmallVec; -use std::{error, io}; -use std::collections::hash_map::Entry; -use std::pin::Pin; -use std::task::{Context, Poll}; -use std::time::Duration; +use std::{ + collections::hash_map::Entry, + error, io, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Instant; -use crate::utils::interval; /// Time after we disconnect from a node before we purge its information from the cache. const CACHE_EXPIRE: Duration = Duration::from_secs(10 * 60); @@ -70,21 +79,13 @@ impl NodeInfo { fn new(endpoint: ConnectedPoint) -> Self { let mut endpoints = SmallVec::new(); endpoints.push(endpoint); - NodeInfo { - info_expire: None, - endpoints, - client_version: None, - latest_ping: None, - } + NodeInfo { info_expire: None, endpoints, client_version: None, latest_ping: None } } } impl PeerInfoBehaviour { /// Builds a new `PeerInfoBehaviour`. - pub fn new( - user_agent: String, - local_public_key: PublicKey, - ) -> Self { + pub fn new(user_agent: String, local_public_key: PublicKey) -> Self { let identify = { let cfg = IdentifyConfig::new("/substrate/1.0".to_string(), local_public_key) .with_agent_version(user_agent); @@ -172,7 +173,7 @@ pub enum PeerInfoEvent { impl NetworkBehaviour for PeerInfoBehaviour { type ProtocolsHandler = IntoProtocolsHandlerSelect< ::ProtocolsHandler, - ::ProtocolsHandler + ::ProtocolsHandler, >; type OutEvent = PeerInfoEvent; @@ -191,13 +192,18 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.inject_connected(peer_id); } - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.ping.inject_connection_established(peer_id, conn, endpoint); self.identify.inject_connection_established(peer_id, conn, endpoint); match self.nodes_info.entry(peer_id.clone()) { Entry::Vacant(e) => { e.insert(NodeInfo::new(endpoint.clone())); - } + }, Entry::Occupied(e) => { let e = e.into_mut(); if e.info_expire.as_ref().map(|exp| *exp < Instant::now()).unwrap_or(false) { @@ -206,11 +212,16 @@ impl NetworkBehaviour for PeerInfoBehaviour { } e.info_expire = None; e.endpoints.push(endpoint.clone()); - } + }, } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.ping.inject_connection_closed(peer_id, conn, endpoint); self.identify.inject_connection_closed(peer_id, conn, endpoint); @@ -238,7 +249,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { &mut self, peer_id: PeerId, connection: ConnectionId, - event: <::Handler as ProtocolsHandler>::OutEvent + event: <::Handler as ProtocolsHandler>::OutEvent, ) { match event { EitherOutput::First(event) => self.ping.inject_event(peer_id, connection, event), @@ -246,7 +257,12 @@ impl NetworkBehaviour for PeerInfoBehaviour { } } - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { self.ping.inject_addr_reach_failure(peer_id, addr, error); self.identify.inject_addr_reach_failure(peer_id, addr, error); } @@ -300,7 +316,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { + >{ loop { match self.ping.poll(cx, params) { Poll::Pending => break, @@ -317,28 +333,29 @@ impl NetworkBehaviour for PeerInfoBehaviour { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, - event: EitherOutput::First(event) + event: EitherOutput::First(event), }), Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), } } loop { match self.identify.poll(cx, params) { Poll::Pending => break, - Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { - match event { - IdentifyEvent::Received { peer_id, info, .. } => { - self.handle_identify_report(&peer_id, &info); - let event = PeerInfoEvent::Identified { peer_id, info }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); - } - IdentifyEvent::Error { peer_id, error } => - debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error), - IdentifyEvent::Pushed { .. } => {} - IdentifyEvent::Sent { .. } => {} - } + Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) => match event { + IdentifyEvent::Received { peer_id, info, .. } => { + self.handle_identify_report(&peer_id, &info); + let event = PeerInfoEvent::Identified { peer_id, info }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) + }, + IdentifyEvent::Error { peer_id, error } => + debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error), + IdentifyEvent::Pushed { .. } => {}, + IdentifyEvent::Sent { .. } => {}, }, Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), @@ -348,10 +365,13 @@ impl NetworkBehaviour for PeerInfoBehaviour { return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, - event: EitherOutput::Second(event) + event: EitherOutput::Second(event), }), Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + score, + }), } } diff --git a/substrate/client/network/src/protocol.rs b/substrate/client/network/src/protocol.rs index eaed7ffcccace7b0936cd974c48b22d78e837ecc..0838657fae530d1ddc82aa580fe4fc09dd166b6b 100644 --- a/substrate/client/network/src/protocol.rs +++ b/substrate/client/network/src/protocol.rs @@ -21,49 +21,64 @@ use crate::{ config::{self, ProtocolId}, error, request_responses::RequestFailure, - utils::{interval, LruHashSet}, schema::v1::StateResponse, + utils::{interval, LruHashSet}, }; use bytes::Bytes; use codec::{Decode, DecodeAll, Encode}; use futures::{channel::oneshot, prelude::*}; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, + }, + request_response::OutboundFailure, + swarm::{ + IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, + ProtocolsHandler, + }, + Multiaddr, PeerId, +}; +use log::{debug, error, log, trace, warn, Level}; +use message::{ + generic::{Message as GenericMessage, Roles}, + BlockAnnounce, Message, +}; use notifications::{Notifications, NotificationsOut}; -use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; -use libp2p::request_response::OutboundFailure; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; -use libp2p::{Multiaddr, PeerId}; -use log::{log, Level, trace, debug, warn, error}; -use message::{BlockAnnounce, Message}; -use message::generic::{Message as GenericMessage, Roles}; -use prometheus_endpoint::{Registry, Gauge, GaugeVec, PrometheusError, Opts, register, U64}; +use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use prost::Message as _; +use sp_arithmetic::traits::SaturatedConversion; use sp_consensus::{ - BlockOrigin, block_validation::BlockAnnounceValidator, - import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} + import_queue::{BlockImportError, BlockImportResult, IncomingBlock, Origin}, + BlockOrigin, }; use sp_runtime::{ - Justifications, generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub}, + traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, + Justifications, +}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet, VecDeque}, + convert::TryFrom as _, + io, iter, + num::NonZeroUsize, + pin::Pin, + sync::Arc, + task::Poll, + time, }; -use sp_arithmetic::traits::SaturatedConversion; use sync::{ChainSync, Status as SyncStatus}; -use std::borrow::Cow; -use std::convert::TryFrom as _; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::sync::Arc; -use std::{io, iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; mod notifications; -pub mod message; pub mod event; +pub mod message; pub mod sync; -pub use notifications::{NotificationsSink, Ready, NotifsHandlerError}; +pub use notifications::{NotificationsSink, NotifsHandlerError, Ready}; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); @@ -134,7 +149,7 @@ impl Metrics { let g = GaugeVec::new( Opts::new( "sync_extra_justifications", - "Number of extra justifications requests" + "Number of extra justifications requests", ), &["status"], )?; @@ -191,10 +206,7 @@ enum PeerRequest { struct Peer { info: PeerInfo, /// Current request, if any. Started by emitting [`CustomMessageOutcome::BlockRequest`]. - request: Option<( - PeerRequest, - oneshot::Receiver, RequestFailure>>, - )>, + request: Option<(PeerRequest, oneshot::Receiver, RequestFailure>>)>, /// Holds a set of blocks known to this peer. known_blocks: LruHashSet, } @@ -228,13 +240,8 @@ impl ProtocolConfig { } else { match self.sync_mode { config::SyncMode::Full => sync::SyncMode::Full, - config::SyncMode::Fast { - skip_proofs, - storage_chain_mode, - } => sync::SyncMode::LightState { - skip_proofs, - storage_chain_mode - }, + config::SyncMode::Fast { skip_proofs, storage_chain_mode } => + sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, } } } @@ -296,7 +303,8 @@ impl Protocol { chain.clone(), block_announce_validator, config.max_parallel_downloads, - ).map_err(Box::new)?; + ) + .map_err(Box::new)?; let boot_node_ids = { let mut list = HashSet::new(); @@ -312,7 +320,11 @@ impl Protocol { for reserved in &network_config.default_peers_set.reserved_nodes { imp_p.insert(reserved.peer_id.clone()); } - for reserved in network_config.extra_sets.iter().flat_map(|s| s.set_config.reserved_nodes.iter()) { + for reserved in network_config + .extra_sets + .iter() + .flat_map(|s| s.set_config.reserved_nodes.iter()) + { imp_p.insert(reserved.peer_id.clone()); } imp_p.shrink_to_fit(); @@ -322,7 +334,8 @@ impl Protocol { let mut known_addresses = Vec::new(); let (peerset, peerset_handle) = { - let mut sets = Vec::with_capacity(NUM_HARDCODED_PEERSETS + network_config.extra_sets.len()); + let mut sets = + Vec::with_capacity(NUM_HARDCODED_PEERSETS + network_config.extra_sets.len()); let mut default_sets_reserved = HashSet::new(); for reserved in network_config.default_peers_set.reserved_nodes.iter() { @@ -342,8 +355,8 @@ impl Protocol { out_peers: network_config.default_peers_set.out_peers, bootnodes, reserved_nodes: default_sets_reserved.clone(), - reserved_only: network_config.default_peers_set.non_reserved_mode - == config::NonReservedPeerMode::Deny, + reserved_only: network_config.default_peers_set.non_reserved_mode == + config::NonReservedPeerMode::Deny, }); for set_cfg in &network_config.extra_sets { @@ -365,9 +378,7 @@ impl Protocol { }); } - sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { - sets, - }) + sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets }) }; let block_announces_protocol: Cow<'static, str> = Cow::from({ @@ -383,12 +394,9 @@ impl Protocol { let best_hash = info.best_hash; let genesis_hash = info.genesis_hash; - let block_announces_handshake = BlockAnnouncesHandshake::::build( - &config, - best_number, - best_hash, - genesis_hash, - ).encode(); + let block_announces_handshake = + BlockAnnouncesHandshake::::build(&config, best_number, best_hash, genesis_hash) + .encode(); let sync_protocol_config = notifications::ProtocolConfig { name: block_announces_protocol, @@ -399,22 +407,22 @@ impl Protocol { Notifications::new( peerset, - iter::once(sync_protocol_config) - .chain(network_config.extra_sets.iter() - .zip(notifications_protocols_handshakes) - .map(|(s, hs)| notifications::ProtocolConfig { + iter::once(sync_protocol_config).chain( + network_config.extra_sets.iter().zip(notifications_protocols_handshakes).map( + |(s, hs)| notifications::ProtocolConfig { name: s.notifications_protocol.clone(), fallback_names: s.fallback_names.clone(), handshake: hs, max_notification_size: s.max_notification_size, - }) + }, ), + ), ) }; let block_announce_data_cache = lru::LruCache::new( - network_config.default_peers_set.in_peers as usize - + network_config.default_peers_set.out_peers as usize, + network_config.default_peers_set.in_peers as usize + + network_config.default_peers_set.out_peers as usize, ); let protocol = Protocol { @@ -428,8 +436,11 @@ impl Protocol { important_peers, peerset_handle: peerset_handle.clone(), behaviour, - notification_protocols: - network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone()).collect(), + notification_protocols: network_config + .extra_sets + .iter() + .map(|s| s.notifications_protocol.clone()) + .collect(), bad_handshake_substreams: Default::default(), metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) @@ -461,8 +472,12 @@ impl Protocol { /// Disconnects the given peer if we are connected to it. pub fn disconnect_peer(&mut self, peer_id: &PeerId, protocol_name: &str) { - if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) { - self.behaviour.disconnect_peer(peer_id, sc_peerset::SetId::from(position + NUM_HARDCODED_PEERSETS)); + if let Some(position) = self.notification_protocols.iter().position(|p| *p == protocol_name) + { + self.behaviour.disconnect_peer( + peer_id, + sc_peerset::SetId::from(position + NUM_HARDCODED_PEERSETS), + ); } else { log::warn!(target: "sub-libp2p", "disconnect_peer() with invalid protocol name") } @@ -480,10 +495,7 @@ impl Protocol { /// Returns the number of peers we're connected to and that are being queried. pub fn num_active_peers(&self) -> usize { - self.peers - .values() - .filter(|p| p.request.is_some()) - .count() + self.peers.values().filter(|p| p.request.is_some()).count() } /// Current global sync state. @@ -524,12 +536,8 @@ impl Protocol { self.behaviour.set_notif_protocol_handshake( HARDCODED_PEERSETS_SYNC, - BlockAnnouncesHandshake::::build( - &self.config, - number, - hash, - self.genesis_hash, - ).encode() + BlockAnnouncesHandshake::::build(&self.config, number, hash, self.genesis_hash) + .encode(), ); } @@ -566,8 +574,11 @@ impl Protocol { } if let Some(_peer_data) = self.peers.remove(&peer) { - if let Some(sync::OnBlockData::Import(origin, blocks)) = self.sync.peer_disconnected(&peer) { - self.pending_messages.push_back(CustomMessageOutcome::BlockImport(origin, blocks)); + if let Some(sync::OnBlockData::Import(origin, blocks)) = + self.sync.peer_disconnected(&peer) + { + self.pending_messages + .push_back(CustomMessageOutcome::BlockImport(origin, blocks)); } Ok(()) } else { @@ -588,67 +599,76 @@ impl Protocol { request: message::BlockRequest, response: crate::schema::v1::BlockResponse, ) -> CustomMessageOutcome { - let blocks = response.blocks.into_iter().map(|block_data| { - Ok(message::BlockData:: { - hash: Decode::decode(&mut block_data.hash.as_ref())?, - header: if !block_data.header.is_empty() { - Some(Decode::decode(&mut block_data.header.as_ref())?) - } else { - None - }, - body: if request.fields.contains(message::BlockAttributes::BODY) { - Some(block_data.body.iter().map(|body| { - Decode::decode(&mut body.as_ref()) - }).collect::, _>>()?) - } else { - None - }, - indexed_body: if request.fields.contains(message::BlockAttributes::INDEXED_BODY) { - Some(block_data.indexed_body) - } else { - None - }, - receipt: if !block_data.message_queue.is_empty() { - Some(block_data.receipt) - } else { - None - }, - message_queue: if !block_data.message_queue.is_empty() { - Some(block_data.message_queue) - } else { - None - }, - justification: if !block_data.justification.is_empty() { - Some(block_data.justification) - } else if block_data.is_empty_justification { - Some(Vec::new()) - } else { - None - }, - justifications: if !block_data.justifications.is_empty() { - Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) - } else { - None - }, + let blocks = response + .blocks + .into_iter() + .map(|block_data| { + Ok(message::BlockData:: { + hash: Decode::decode(&mut block_data.hash.as_ref())?, + header: if !block_data.header.is_empty() { + Some(Decode::decode(&mut block_data.header.as_ref())?) + } else { + None + }, + body: if request.fields.contains(message::BlockAttributes::BODY) { + Some( + block_data + .body + .iter() + .map(|body| Decode::decode(&mut body.as_ref())) + .collect::, _>>()?, + ) + } else { + None + }, + indexed_body: if request.fields.contains(message::BlockAttributes::INDEXED_BODY) + { + Some(block_data.indexed_body) + } else { + None + }, + receipt: if !block_data.message_queue.is_empty() { + Some(block_data.receipt) + } else { + None + }, + message_queue: if !block_data.message_queue.is_empty() { + Some(block_data.message_queue) + } else { + None + }, + justification: if !block_data.justification.is_empty() { + Some(block_data.justification) + } else if block_data.is_empty_justification { + Some(Vec::new()) + } else { + None + }, + justifications: if !block_data.justifications.is_empty() { + Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) + } else { + None + }, + }) }) - }).collect::, codec::Error>>(); + .collect::, codec::Error>>(); let blocks = match blocks { Ok(blocks) => blocks, Err(err) => { debug!(target: "sync", "Failed to decode block response from {}: {}", peer_id, err); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); - return CustomMessageOutcome::None; - } + return CustomMessageOutcome::None + }, }; - let block_response = message::BlockResponse:: { - id: request.id, - blocks, - }; + let block_response = message::BlockResponse:: { id: request.id, blocks }; let blocks_range = || match ( - block_response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), + block_response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), block_response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), ) { (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), @@ -671,20 +691,18 @@ impl Protocol { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None - } + }, } } else { match self.sync.on_block_data(&peer_id, Some(request), block_response) { Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), - Ok(sync::OnBlockData::Request(peer, req)) => { - self.prepare_block_request(peer, req) - } + Ok(sync::OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None - } + }, } } } @@ -699,14 +717,13 @@ impl Protocol { match self.sync.on_state_data(&peer_id, response) { Ok(sync::OnStateData::Import(origin, block)) => CustomMessageOutcome::BlockImport(origin, vec![block]), - Ok(sync::OnStateData::Request(peer, req)) => { - prepare_state_request::(&mut self.peers, peer, req) - } + Ok(sync::OnStateData::Request(peer, req)) => + prepare_state_request::(&mut self.peers, peer, req), Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None - } + }, } } @@ -732,7 +749,7 @@ impl Protocol { if self.peers.contains_key(&who) { log::error!(target: "sync", "Called on_sync_peer_connected with already connected peer {}", who); debug_assert!(false); - return Err(()); + return Err(()) } if status.genesis_hash != self.genesis_hash { @@ -755,7 +772,7 @@ impl Protocol { ); } - return Err(()); + return Err(()) } if self.config.roles.is_light() { @@ -764,14 +781,11 @@ impl Protocol { debug!(target: "sync", "Peer {} is unable to serve light requests", who); self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()); + return Err(()) } // we don't interested in peers that are far behind us - let self_best_block = self - .chain - .info() - .best_number; + let self_best_block = self.chain.info().best_number; let blocks_difference = self_best_block .checked_sub(&status.best_number) .unwrap_or_else(Zero::zero) @@ -780,7 +794,7 @@ impl Protocol { debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); self.peerset_handle.report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - return Err(()); + return Err(()) } } @@ -788,11 +802,12 @@ impl Protocol { info: PeerInfo { roles: status.roles, best_hash: status.best_hash, - best_number: status.best_number + best_number: status.best_number, }, request: None, - known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) - .expect("Constant is nonzero")), + known_blocks: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), + ), }; let req = if peer.info.roles.is_full() { @@ -802,7 +817,7 @@ impl Protocol { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); return Err(()) - } + }, } } else { None @@ -811,7 +826,8 @@ impl Protocol { debug!(target: "sync", "Connected {}", who); self.peers.insert(who.clone(), peer); - self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); + self.pending_messages + .push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); if let Some(req) = req { let event = self.prepare_block_request(who.clone(), req); @@ -830,23 +846,25 @@ impl Protocol { Ok(Some(header)) => header, Ok(None) => { warn!("Trying to announce unknown block: {}", hash); - return; - } + return + }, Err(e) => { warn!("Error reading block header {}: {:?}", hash, e); - return; - } + return + }, }; // don't announce genesis block since it will be ignored if header.number().is_zero() { - return; + return } let is_best = self.chain.info().best_hash == hash; debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); - let data = data.or_else(|| self.block_announce_data_cache.get(&hash).cloned()).unwrap_or_default(); + let data = data + .or_else(|| self.block_announce_data_cache.get(&hash).cloned()) + .unwrap_or_default(); for (who, ref mut peer) in self.peers.iter_mut() { let inserted = peer.known_blocks.insert(hash); @@ -862,11 +880,8 @@ impl Protocol { data: Some(data.clone()), }; - self.behaviour.write_notification( - who, - HARDCODED_PEERSETS_SYNC, - message.encode() - ); + self.behaviour + .write_notification(who, HARDCODED_PEERSETS_SYNC, message.encode()); } } } @@ -884,11 +899,7 @@ impl Protocol { /// in the task before being polled once. So, it is required to call /// [`ChainSync::poll_block_announce_validation`] to ensure that the future is /// registered properly and will wake up the task when being ready. - fn push_block_announce_validation( - &mut self, - who: PeerId, - announce: BlockAnnounce, - ) { + fn push_block_announce_validation(&mut self, who: PeerId, announce: BlockAnnounce) { let hash = announce.header.hash(); let peer = match self.peers.get_mut(&who) { @@ -896,8 +907,8 @@ impl Protocol { None => { log::error!(target: "sync", "Received block announce from disconnected peer {}", who); debug_assert!(false); - return; - } + return + }, }; peer.known_blocks.insert(hash.clone()); @@ -918,8 +929,7 @@ impl Protocol { validation_result: sync::PollBlockAnnounceValidation, ) -> CustomMessageOutcome { let (header, is_best, who) = match validation_result { - sync::PollBlockAnnounceValidation::Skip => - return CustomMessageOutcome::None, + sync::PollBlockAnnounceValidation::Skip => return CustomMessageOutcome::None, sync::PollBlockAnnounceValidation::Nothing { is_best, who, announce } => { self.update_peer_info(&who); @@ -940,7 +950,7 @@ impl Protocol { } else { return CustomMessageOutcome::None } - } + }, sync::PollBlockAnnounceValidation::ImportHeader { announce, is_best, who } => { self.update_peer_info(&who); @@ -951,7 +961,7 @@ impl Protocol { } (announce.header, is_best, who) - } + }, sync::PollBlockAnnounceValidation::Failure { who, disconnect } => { if disconnect { self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); @@ -959,7 +969,7 @@ impl Protocol { self.report_peer(who, rep::BAD_BLOCK_ANNOUNCEMENT); return CustomMessageOutcome::None - } + }, }; let number = *header.number(); @@ -971,39 +981,32 @@ impl Protocol { None, message::generic::BlockResponse { id: 0, - blocks: vec![ - message::generic::BlockData { - hash: header.hash(), - header: Some(header), - body: None, - indexed_body: None, - receipt: None, - message_queue: None, - justification: None, - justifications: None, - }, - ], + blocks: vec![message::generic::BlockData { + hash: header.hash(), + header: Some(header), + body: None, + indexed_body: None, + receipt: None, + message_queue: None, + justification: None, + justifications: None, + }], }, ); if is_best { - self.pending_messages.push_back( - CustomMessageOutcome::PeerNewBest(who, number), - ); + self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); } match blocks_to_import { - Ok(sync::OnBlockData::Import(origin, blocks)) => { - CustomMessageOutcome::BlockImport(origin, blocks) - }, - Ok(sync::OnBlockData::Request(peer, req)) => { - self.prepare_block_request(peer, req) - } + Ok(sync::OnBlockData::Import(origin, blocks)) => + CustomMessageOutcome::BlockImport(origin, blocks), + Ok(sync::OnBlockData::Request(peer, req)) => self.prepare_block_request(peer, req), Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); CustomMessageOutcome::None - } + }, } } @@ -1029,7 +1032,12 @@ impl Protocol { /// Request syncing for the given block from given set of peers. /// Uses `protocol` to queue a new block download request and tries to dispatch all pending /// requests. - pub fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor) { + pub fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { self.sync.set_sync_fork_request(peers, hash, number) } @@ -1040,39 +1048,41 @@ impl Protocol { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - let results = self.sync.on_blocks_processed( - imported, - count, - results, - ); + let results = self.sync.on_blocks_processed(imported, count, results); for result in results { match result { Ok((id, req)) => { - self.pending_messages.push_back( - prepare_block_request(&mut self.peers, id, req) - ); - } + self.pending_messages.push_back(prepare_block_request( + &mut self.peers, + id, + req, + )); + }, Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu) - } + }, } } } /// Call this when a justification has been processed by the import queue, with or without /// errors. - pub fn justification_import_result(&mut self, who: PeerId, hash: B::Hash, number: NumberFor, success: bool) { + pub fn justification_import_result( + &mut self, + who: PeerId, + hash: B::Hash, + number: NumberFor, + success: bool, + ) { self.sync.on_justification_import(hash, number, success); if !success { log::info!("💔 Invalid justification provided by {} for #{}", who, hash); self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC); - self.peerset_handle.report_peer( - who, - sc_peerset::ReputationChange::new_fatal("Invalid justification") - ); + self.peerset_handle + .report_peer(who, sc_peerset::ReputationChange::new_fatal("Invalid justification")); } } @@ -1104,7 +1114,10 @@ impl Protocol { /// Removes a `PeerId` from the list of reserved peers. pub fn remove_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle.remove_reserved_peer( + sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), + peer, + ); } else { log::error!( target: "sub-libp2p", @@ -1117,7 +1130,8 @@ impl Protocol { /// Adds a `PeerId` to the list of reserved peers. pub fn add_set_reserved_peer(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle + .add_reserved_peer(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); } else { log::error!( target: "sub-libp2p", @@ -1139,7 +1153,8 @@ impl Protocol { /// Add a peer to a peers set. pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle + .add_to_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); } else { log::error!( target: "sub-libp2p", @@ -1152,7 +1167,10 @@ impl Protocol { /// Remove a peer from a peers set. pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peer: PeerId) { if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { - self.peerset_handle.remove_from_peers_set(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peer); + self.peerset_handle.remove_from_peers_set( + sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), + peer, + ); } else { log::error!( target: "sub-libp2p", @@ -1172,13 +1190,21 @@ impl Protocol { metrics.fork_targets.set(m.fork_targets.into()); metrics.queued_blocks.set(m.queued_blocks.into()); - metrics.justifications.with_label_values(&["pending"]) + metrics + .justifications + .with_label_values(&["pending"]) .set(m.justifications.pending_requests.into()); - metrics.justifications.with_label_values(&["active"]) + metrics + .justifications + .with_label_values(&["active"]) .set(m.justifications.active_requests.into()); - metrics.justifications.with_label_values(&["failed"]) + metrics + .justifications + .with_label_values(&["failed"]) .set(m.justifications.failed_requests.into()); - metrics.justifications.with_label_values(&["importing"]) + metrics + .justifications + .with_label_values(&["importing"]) .set(m.justifications.importing_requests.into()); } } @@ -1209,11 +1235,7 @@ fn prepare_block_request( support_multiple_justifications: true, }; - CustomMessageOutcome::BlockRequest { - target: who, - request: request, - pending_response: tx, - } + CustomMessageOutcome::BlockRequest { target: who, request, pending_response: tx } } fn prepare_state_request( @@ -1226,11 +1248,7 @@ fn prepare_state_request( if let Some(ref mut peer) = peers.get_mut(&who) { peer.request = Some((PeerRequest::State, rx)); } - CustomMessageOutcome::StateRequest { - target: who, - request: request, - pending_response: tx, - } + CustomMessageOutcome::StateRequest { target: who, request, pending_response: tx } } /// Outcome of an incoming custom message. @@ -1246,7 +1264,7 @@ pub enum CustomMessageOutcome { /// See [`crate::Event::NotificationStreamOpened::negotiated_fallback`]. negotiated_fallback: Option>, roles: Roles, - notifications_sink: NotificationsSink + notifications_sink: NotificationsSink, }, /// The [`NotificationsSink`] of some notification protocols need an update. NotificationStreamReplaced { @@ -1255,9 +1273,15 @@ pub enum CustomMessageOutcome { notifications_sink: NotificationsSink, }, /// Notification protocols have been closed with a remote. - NotificationStreamClosed { remote: PeerId, protocol: Cow<'static, str> }, + NotificationStreamClosed { + remote: PeerId, + protocol: Cow<'static, str>, + }, /// Messages have been received on one or more notifications protocols. - NotificationsReceived { remote: PeerId, messages: Vec<(Cow<'static, str>, Bytes)> }, + NotificationsReceived { + remote: PeerId, + messages: Vec<(Cow<'static, str>, Bytes)>, + }, /// A new block request must be emitted. BlockRequest { target: PeerId, @@ -1291,11 +1315,21 @@ impl NetworkBehaviour for Protocol { self.behaviour.addresses_of_peer(peer_id) } - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.behaviour.inject_connection_established(peer_id, conn, endpoint) } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.behaviour.inject_connection_closed(peer_id, conn, endpoint) } @@ -1325,9 +1359,9 @@ impl NetworkBehaviour for Protocol { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { + >{ if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } // Check for finished outgoing requests. @@ -1340,38 +1374,44 @@ impl NetworkBehaviour for Protocol { let (req, _) = peer.request.take().unwrap(); match req { PeerRequest::Block(req) => { - let protobuf_response = match crate::schema::v1::BlockResponse::decode(&resp[..]) { - Ok(proto) => proto, - Err(e) => { - debug!( - target: "sync", - "Failed to decode block response from peer {:?}: {:?}.", - id, - e - ); - self.peerset_handle.report_peer(id.clone(), rep::BAD_MESSAGE); - self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - continue; - } - }; + let protobuf_response = + match crate::schema::v1::BlockResponse::decode(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: "sync", + "Failed to decode block response from peer {:?}: {:?}.", + id, + e + ); + self.peerset_handle + .report_peer(id.clone(), rep::BAD_MESSAGE); + self.behaviour + .disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + continue + }, + }; finished_block_requests.push((id.clone(), req, protobuf_response)); }, PeerRequest::State => { - let protobuf_response = match crate::schema::v1::StateResponse::decode(&resp[..]) { - Ok(proto) => proto, - Err(e) => { - debug!( - target: "sync", - "Failed to decode state response from peer {:?}: {:?}.", - id, - e - ); - self.peerset_handle.report_peer(id.clone(), rep::BAD_MESSAGE); - self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - continue; - } - }; + let protobuf_response = + match crate::schema::v1::StateResponse::decode(&resp[..]) { + Ok(proto) => proto, + Err(e) => { + debug!( + target: "sync", + "Failed to decode state response from peer {:?}: {:?}.", + id, + e + ); + self.peerset_handle + .report_peer(id.clone(), rep::BAD_MESSAGE); + self.behaviour + .disconnect_peer(id, HARDCODED_PEERSETS_SYNC); + continue + }, + }; finished_state_requests.push((id.clone(), protobuf_response)); }, @@ -1385,32 +1425,35 @@ impl NetworkBehaviour for Protocol { RequestFailure::Network(OutboundFailure::Timeout) => { self.peerset_handle.report_peer(id.clone(), rep::TIMEOUT); self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - } + }, RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { self.peerset_handle.report_peer(id.clone(), rep::BAD_PROTOCOL); self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - } + }, RequestFailure::Network(OutboundFailure::DialFailure) => { self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - } + }, RequestFailure::Refused => { self.peerset_handle.report_peer(id.clone(), rep::REFUSED); self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); - } - RequestFailure::Network(OutboundFailure::ConnectionClosed) - | RequestFailure::NotConnected => { + }, + RequestFailure::Network(OutboundFailure::ConnectionClosed) | + RequestFailure::NotConnected => { self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); }, RequestFailure::UnknownProtocol => { - debug_assert!(false, "Block request protocol should always be known."); - } + debug_assert!( + false, + "Block request protocol should always be known." + ); + }, RequestFailure::Obsolete => { debug_assert!( false, "Can not receive `RequestFailure::Obsolete` after dropping the \ response receiver.", ); - } + }, } }, Poll::Ready(Err(oneshot::Canceled)) => { @@ -1461,7 +1504,7 @@ impl NetworkBehaviour for Protocol { } if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } let event = match self.behaviour.poll(cx, params) { @@ -1472,14 +1515,22 @@ impl NetworkBehaviour for Protocol { Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }), + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }), Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }) => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address, score }), }; let outcome = match event { NotificationsOut::CustomProtocolOpen { - peer_id, set_id, received_handshake, notifications_sink, negotiated_fallback + peer_id, + set_id, + received_handshake, + notifications_sink, + negotiated_fallback, } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { @@ -1512,16 +1563,21 @@ impl NetworkBehaviour for Protocol { ); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None - } + }, Err(err) => { - match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + match as DecodeAll>::decode_all( + &mut &received_handshake[..], + ) { Ok(handshake) => { - if self.on_sync_peer_connected(peer_id.clone(), handshake).is_ok() { + if self + .on_sync_peer_connected(peer_id.clone(), handshake) + .is_ok() + { CustomMessageOutcome::SyncConnected(peer_id) } else { CustomMessageOutcome::None } - } + }, Err(err2) => { debug!( target: "sync", @@ -1533,21 +1589,24 @@ impl NetworkBehaviour for Protocol { ); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None - } + }, } - } + }, } - } else { - match (message::Roles::decode_all(&received_handshake[..]), self.peers.get(&peer_id)) { - (Ok(roles), _) => - CustomMessageOutcome::NotificationStreamOpened { - remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), - negotiated_fallback, - roles, - notifications_sink, - }, + match ( + message::Roles::decode_all(&received_handshake[..]), + self.peers.get(&peer_id), + ) { + (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), + negotiated_fallback, + roles, + notifications_sink, + }, (Err(_), Some(peer)) if received_handshake.is_empty() => { // As a convenience, we allow opening substreams for "external" // notification protocols with an empty handshake. This fetches the @@ -1555,7 +1614,9 @@ impl NetworkBehaviour for Protocol { // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), negotiated_fallback, roles: peer.info.roles, notifications_sink, @@ -1567,11 +1628,11 @@ impl NetworkBehaviour for Protocol { self.behaviour.disconnect_peer(&peer_id, set_id); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None - } + }, } } - } - NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => { + }, + NotificationsOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => if set_id == HARDCODED_PEERSETS_SYNC { CustomMessageOutcome::None } else if self.bad_handshake_substreams.contains(&(peer_id.clone(), set_id)) { @@ -1579,11 +1640,12 @@ impl NetworkBehaviour for Protocol { } else { CustomMessageOutcome::NotificationStreamReplaced { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), notifications_sink, } - } - }, + }, NotificationsOut::CustomProtocolClosed { peer_id, set_id } => { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { @@ -1605,55 +1667,57 @@ impl NetworkBehaviour for Protocol { } else { CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, - protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), + protocol: self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(), } } }, - NotificationsOut::Notification { peer_id, set_id, message } => - match set_id { - HARDCODED_PEERSETS_SYNC if self.peers.contains_key(&peer_id) => { - if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { - self.push_block_announce_validation(peer_id, announce); - - // Make sure that the newly added block announce validation future was - // polled once to be registered in the task. - if let Poll::Ready(res) = self.sync.poll_block_announce_validation(cx) { - self.process_block_announce_validation_result(res) - } else { - CustomMessageOutcome::None - } + NotificationsOut::Notification { peer_id, set_id, message } => match set_id { + HARDCODED_PEERSETS_SYNC if self.peers.contains_key(&peer_id) => { + if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { + self.push_block_announce_validation(peer_id, announce); + + // Make sure that the newly added block announce validation future was + // polled once to be registered in the task. + if let Poll::Ready(res) = self.sync.poll_block_announce_validation(cx) { + self.process_block_announce_validation_result(res) } else { - warn!(target: "sub-libp2p", "Failed to decode block announce"); CustomMessageOutcome::None } - } - HARDCODED_PEERSETS_SYNC => { - trace!( - target: "sync", - "Received sync for peer earlier refused by sync layer: {}", - peer_id - ); - CustomMessageOutcome::None - } - _ if self.bad_handshake_substreams.contains(&(peer_id.clone(), set_id)) => { + } else { + warn!(target: "sub-libp2p", "Failed to decode block announce"); CustomMessageOutcome::None } - _ => { - let protocol_name = self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(); - CustomMessageOutcome::NotificationsReceived { - remote: peer_id, - messages: vec![(protocol_name, message.freeze())], - } + }, + HARDCODED_PEERSETS_SYNC => { + trace!( + target: "sync", + "Received sync for peer earlier refused by sync layer: {}", + peer_id + ); + CustomMessageOutcome::None + }, + _ if self.bad_handshake_substreams.contains(&(peer_id.clone(), set_id)) => + CustomMessageOutcome::None, + _ => { + let protocol_name = self.notification_protocols + [usize::from(set_id) - NUM_HARDCODED_PEERSETS] + .clone(); + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + messages: vec![(protocol_name, message.freeze())], } - } + }, + }, }; if !matches!(outcome, CustomMessageOutcome::::None) { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) } if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)) } // This block can only be reached if an event was pulled from the behaviour and that @@ -1667,7 +1731,7 @@ impl NetworkBehaviour for Protocol { &mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, - error: &dyn std::error::Error + error: &dyn std::error::Error, ) { self.behaviour.inject_addr_reach_failure(peer_id, addr, error) } diff --git a/substrate/client/network/src/protocol/event.rs b/substrate/client/network/src/protocol/event.rs index c13980b3f43026836a90d781dbc24bd3143e224c..df56f426ad1fe18ae9fb7ed4880243d62180cab8 100644 --- a/substrate/client/network/src/protocol/event.rs +++ b/substrate/client/network/src/protocol/event.rs @@ -20,8 +20,7 @@ //! events that happen on the network like DHT get/put results received. use bytes::Bytes; -use libp2p::core::PeerId; -use libp2p::kad::record::Key; +use libp2p::{core::PeerId, kad::record::Key}; use std::borrow::Cow; /// Events generated by DHT as a response to get_value and put_value requests. diff --git a/substrate/client/network/src/protocol/message.rs b/substrate/client/network/src/protocol/message.rs index 50d0fd7969021f46b1f4add2a2dfd0edbce4e356..95f5ffa3a545aa3400fcee88b6c221ad22974691 100644 --- a/substrate/client/network/src/protocol/message.rs +++ b/substrate/client/network/src/protocol/message.rs @@ -18,16 +18,17 @@ //! Network packet message types. These get serialized and put into the lower level protocol payload. -use bitflags::bitflags; -use sp_runtime::{ConsensusEngineId, traits::{Block as BlockT, Header as HeaderT}}; -use codec::{Encode, Decode, Input, Output, Error}; pub use self::generic::{ - BlockAnnounce, RemoteCallRequest, RemoteReadRequest, - RemoteHeaderRequest, RemoteHeaderResponse, - RemoteChangesRequest, RemoteChangesResponse, - FromBlock, RemoteReadChildRequest, Roles, + BlockAnnounce, FromBlock, RemoteCallRequest, RemoteChangesRequest, RemoteChangesResponse, + RemoteHeaderRequest, RemoteHeaderResponse, RemoteReadChildRequest, RemoteReadRequest, Roles, }; +use bitflags::bitflags; +use codec::{Decode, Encode, Error, Input, Output}; use sc_client_api::StorageProof; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT}, + ConsensusEngineId, +}; /// A unique ID of a request. pub type RequestId = u64; @@ -41,24 +42,16 @@ pub type Message = generic::Message< >; /// Type alias for using the block request type using block type parameters. -pub type BlockRequest = generic::BlockRequest< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type BlockRequest = + generic::BlockRequest<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the BlockData type using block type parameters. -pub type BlockData = generic::BlockData< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockData = + generic::BlockData<::Header, ::Hash, ::Extrinsic>; /// Type alias for using the BlockResponse type using block type parameters. -pub type BlockResponse = generic::BlockResponse< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockResponse = + generic::BlockResponse<::Header, ::Hash, ::Extrinsic>; /// A set of transactions. pub type Transactions = Vec; @@ -168,14 +161,13 @@ impl generic::BlockAnnounce { /// Generic types. pub mod generic { - use bitflags::bitflags; - use codec::{Encode, Decode, Input, Output}; - use sp_runtime::{EncodedJustification, Justifications}; use super::{ - RemoteReadResponse, Transactions, Direction, - RequestId, BlockAttributes, RemoteCallResponse, ConsensusEngineId, - BlockState, StorageProof, + BlockAttributes, BlockState, ConsensusEngineId, Direction, RemoteCallResponse, + RemoteReadResponse, RequestId, StorageProof, Transactions, }; + use bitflags::bitflags; + use codec::{Decode, Encode, Input, Output}; + use sp_runtime::{EncodedJustification, Justifications}; bitflags! { /// Bitmask of the roles that a node fulfills. @@ -358,11 +350,12 @@ pub mod generic { let compact = CompactStatus::decode(value)?; let chain_status = match >::decode(value) { Ok(v) => v, - Err(e) => if compact.version <= LAST_CHAIN_STATUS_VERSION { - return Err(e) - } else { - Vec::new() - } + Err(e) => + if compact.version <= LAST_CHAIN_STATUS_VERSION { + return Err(e) + } else { + Vec::new() + }, }; let CompactStatus { @@ -443,11 +436,7 @@ pub mod generic { let header = H::decode(input)?; let state = BlockState::decode(input).ok(); let data = Vec::decode(input).ok(); - Ok(BlockAnnounce { - header, - state, - data, - }) + Ok(BlockAnnounce { header, state, data }) } } diff --git a/substrate/client/network/src/protocol/notifications.rs b/substrate/client/network/src/protocol/notifications.rs index 8739eb4948b77d56273913ea1c8c6891c7782039..e489970e987c65ebae84352589f44d157b6a51d9 100644 --- a/substrate/client/network/src/protocol/notifications.rs +++ b/substrate/client/network/src/protocol/notifications.rs @@ -19,10 +19,12 @@ //! Implementation of libp2p's `NetworkBehaviour` trait that establishes communications and opens //! notifications substreams. -pub use self::behaviour::{Notifications, NotificationsOut, ProtocolConfig}; -pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready}; +pub use self::{ + behaviour::{Notifications, NotificationsOut, ProtocolConfig}, + handler::{NotificationsSink, NotifsHandlerError, Ready}, +}; mod behaviour; mod handler; -mod upgrade; mod tests; +mod upgrade; diff --git a/substrate/client/network/src/protocol/notifications/behaviour.rs b/substrate/client/network/src/protocol/notifications/behaviour.rs index f95f6870e5fae23c347a562946141ae23e3c7f09..1466e9d4264d969b01196be9c3e8c712510c0f1f 100644 --- a/substrate/client/network/src/protocol/notifications/behaviour.rs +++ b/substrate/client/network/src/protocol/notifications/behaviour.rs @@ -16,28 +16,34 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::protocol::notifications::{ - handler::{self, NotificationsSink, NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn} +use crate::protocol::notifications::handler::{ + self, NotificationsSink, NotifsHandlerIn, NotifsHandlerOut, NotifsHandlerProto, }; use bytes::BytesMut; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; -use libp2p::swarm::{ - DialPeerCondition, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - PollParameters +use libp2p::{ + core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, + swarm::{ + DialPeerCondition, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, + }, }; use log::{error, trace, warn}; use parking_lot::RwLock; use rand::distributions::{Distribution as _, Uniform}; use smallvec::SmallVec; -use std::task::{Context, Poll}; -use std::{borrow::Cow, cmp, collections::{hash_map::Entry, VecDeque}}; -use std::{error, mem, pin::Pin, str, sync::Arc, time::Duration}; +use std::{ + borrow::Cow, + cmp, + collections::{hash_map::Entry, VecDeque}, + error, mem, + pin::Pin, + str, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Instant; /// Network behaviour that handles opening substreams for custom protocols with other peers. @@ -93,7 +99,6 @@ use wasm_timer::Instant; /// the time of this writing, there may be at most two connections to a peer /// and only as a result of simultaneous dialing. However, the implementation /// accommodates for any number of connections. -/// pub struct Notifications { /// Notification protocols. Entries never change after initialization. notif_protocols: Vec, @@ -111,7 +116,9 @@ pub struct Notifications { /// /// By design, we never remove elements from this list. Elements are removed only when the /// `Delay` triggers. As such, this stream may produce obsolete elements. - delays: stream::FuturesUnordered + Send>>>, + delays: stream::FuturesUnordered< + Pin + Send>>, + >, /// [`DelayId`] to assign to the next delay. next_delay_id: DelayId, @@ -401,7 +408,7 @@ impl Notifications { pub fn set_notif_protocol_handshake( &mut self, set_id: sc_peerset::SetId, - handshake_message: impl Into> + handshake_message: impl Into>, ) { if let Some(p) = self.notif_protocols.get_mut(usize::from(set_id)) { *p.handshake.write() = handshake_message.into(); @@ -438,9 +445,10 @@ impl Notifications { &mut self, peer_id: &PeerId, set_id: sc_peerset::SetId, - ban: Option + ban: Option, ) { - let mut entry = if let Entry::Occupied(entry) = self.peers.entry((peer_id.clone(), set_id)) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry((peer_id.clone(), set_id)) + { entry } else { return @@ -454,11 +462,7 @@ impl Notifications { st @ PeerState::Backoff { .. } => *entry.into_mut() = st, // DisabledPendingEnable => Disabled. - PeerState::DisabledPendingEnable { - connections, - timer_deadline, - timer: _ - } => { + PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); let backoff_until = Some(if let Some(ban) = ban { @@ -466,10 +470,7 @@ impl Notifications { } else { timer_deadline }); - *entry.into_mut() = PeerState::Disabled { - connections, - backoff_until - } + *entry.into_mut() = PeerState::Disabled { connections, backoff_until } }, // Enabled => Disabled. @@ -481,15 +482,13 @@ impl Notifications { if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id); - let event = NotificationsOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - set_id, - }; + let event = + NotificationsOut::CustomProtocolClosed { peer_id: peer_id.clone(), set_id }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } - for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_))) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -500,8 +499,8 @@ impl Notifications { *connec_state = ConnectionState::Closing; } - for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening)) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { @@ -512,21 +511,25 @@ impl Notifications { *connec_state = ConnectionState::OpeningThenClosing; } - debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_)))); - debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening))); + debug_assert!(!connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::Open(_)))); + debug_assert!(!connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::Opening))); let backoff_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { - connections, - backoff_until - } + *entry.into_mut() = PeerState::Disabled { connections, backoff_until } }, // Incoming => Disabled. // Ongoing opening requests from the remote are rejected. PeerState::Incoming { mut connections, backoff_until } => { - let inc = if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == entry.key().0 && i.set_id == set_id && i.alive) { + let inc = if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == entry.key().0 && i.set_id == set_id && i.alive) + { inc } else { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ @@ -536,7 +539,8 @@ impl Notifications { inc.alive = false; - for (connec_id, connec_state) in connections.iter_mut() + for (connec_id, connec_state) in connections + .iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); @@ -555,11 +559,10 @@ impl Notifications { (None, None) => None, }; - debug_assert!(!connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); - *entry.into_mut() = PeerState::Disabled { - connections, - backoff_until - } + debug_assert!(!connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + *entry.into_mut() = PeerState::Disabled { connections, backoff_until } }, PeerState::Poisoned => @@ -568,14 +571,21 @@ impl Notifications { } /// Returns the list of all the peers that the peerset currently requests us to be connected to. - pub fn requested_peers<'a>(&'a self, set_id: sc_peerset::SetId) -> impl Iterator + 'a { - self.peers.iter() + pub fn requested_peers<'a>( + &'a self, + set_id: sc_peerset::SetId, + ) -> impl Iterator + 'a { + self.peers + .iter() .filter(move |((_, set), state)| *set == set_id && state.is_requested()) .map(|((id, _), _)| id) } /// Returns the list of reserved peers. - pub fn reserved_peers<'a>(&'a self, set_id: sc_peerset::SetId) -> impl Iterator + 'a { + pub fn reserved_peers<'a>( + &'a self, + set_id: sc_peerset::SetId, + ) -> impl Iterator + 'a { self.peerset.reserved_peers(set_id) } @@ -595,14 +605,15 @@ impl Notifications { set_id: sc_peerset::SetId, message: impl Into>, ) { - let notifs_sink = match self.peers.get(&(target.clone(), set_id)).and_then(|p| p.get_open()) { + let notifs_sink = match self.peers.get(&(target.clone(), set_id)).and_then(|p| p.get_open()) + { None => { trace!(target: "sub-libp2p", "Tried to sent notification to {:?} without an open channel.", target); return }, - Some(sink) => sink + Some(sink) => sink, }; let message = message.into(); @@ -637,11 +648,11 @@ impl Notifications { // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id: entry.key().0.clone(), - condition: DialPeerCondition::Disconnected + condition: DialPeerCondition::Disconnected, }); entry.insert(PeerState::Requested); - return; - } + return + }, }; let now = Instant::now(); @@ -652,10 +663,8 @@ impl Notifications { let peer_id = occ_entry.key().0.clone(); trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Will start to connect at \ until {:?}", peer_id, set_id, timer_deadline); - *occ_entry.into_mut() = PeerState::PendingRequest { - timer: *timer, - timer_deadline: *timer_deadline, - }; + *occ_entry.into_mut() = + PeerState::PendingRequest { timer: *timer, timer_deadline: *timer_deadline }; }, // Backoff (expired) => Requested @@ -666,16 +675,15 @@ impl Notifications { // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id: occ_entry.key().0.clone(), - condition: DialPeerCondition::Disconnected + condition: DialPeerCondition::Disconnected, }); *occ_entry.into_mut() = PeerState::Requested; }, // Disabled (with non-expired ban) => DisabledPendingEnable - PeerState::Disabled { - connections, - backoff_until: Some(ref backoff) - } if *backoff > now => { + PeerState::Disabled { connections, backoff_until: Some(ref backoff) } + if *backoff > now => + { let peer_id = occ_entry.key().0.clone(); trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): But peer is backed-off until {:?}", peer_id, set_id, backoff); @@ -683,27 +691,30 @@ impl Notifications { let delay_id = self.next_delay_id; self.next_delay_id.0 += 1; let delay = futures_timer::Delay::new(*backoff - now); - self.delays.push(async move { - delay.await; - (delay_id, peer_id, set_id) - }.boxed()); + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { connections, timer: delay_id, timer_deadline: *backoff, }; - }, + } // Disabled => Enabled PeerState::Disabled { mut connections, backoff_until } => { - debug_assert!(!connections.iter().any(|(_, s)| { - matches!(s, ConnectionState::Open(_)) - })); + debug_assert!(!connections + .iter() + .any(|(_, s)| { matches!(s, ConnectionState::Open(_)) })); // The first element of `closed` is chosen to open the notifications substream. - if let Some((connec_id, connec_state)) = connections.iter_mut() - .find(|(_, s)| matches!(s, ConnectionState::Closed)) + if let Some((connec_id, connec_state)) = + connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed)) { trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", occ_entry.key().0, set_id); @@ -740,10 +751,13 @@ impl Notifications { self.next_delay_id.0 += 1; debug_assert!(timer_deadline > now); let delay = futures_timer::Delay::new(timer_deadline - now); - self.delays.push(async move { - delay.await; - (delay_id, peer_id, set_id) - }.boxed()); + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { connections, @@ -757,16 +771,22 @@ impl Notifications { PeerState::Incoming { mut connections, .. } => { trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", occ_entry.key().0, set_id); - if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == occ_entry.key().0 && i.set_id == set_id && i.alive) { + if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == occ_entry.key().0 && i.set_id == set_id && i.alive) + { inc.alive = false; } else { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer") } - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); - for (connec_id, connec_state) in connections.iter_mut() + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections + .iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", @@ -820,7 +840,7 @@ impl Notifications { trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", entry.key().0, set_id); return - } + }, }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { @@ -836,10 +856,8 @@ impl Notifications { trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Interrupting pending enabling.", entry.key().0, set_id); - *entry.into_mut() = PeerState::Disabled { - connections, - backoff_until: Some(timer_deadline), - }; + *entry.into_mut() = + PeerState::Disabled { connections, backoff_until: Some(timer_deadline) }; }, // Enabled => Disabled @@ -847,8 +865,10 @@ impl Notifications { trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Disabling connections.", entry.key().0, set_id); - debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id); @@ -859,8 +879,8 @@ impl Notifications { self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } - for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::Opening)) + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Opening)) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", entry.key(), *connec_id, set_id); @@ -872,8 +892,8 @@ impl Notifications { *connec_state = ConnectionState::OpeningThenClosing; } - for (connec_id, connec_state) in connections.iter_mut() - .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) + for (connec_id, connec_state) in + connections.iter_mut().filter(|(_, s)| matches!(s, ConnectionState::Open(_))) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", entry.key(), *connec_id, set_id); @@ -922,7 +942,8 @@ impl Notifications { /// Function that is called when the peerset wants us to accept a connection /// request from a peer. fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); @@ -933,12 +954,16 @@ impl Notifications { trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", index, incoming.peer_id, incoming.set_id); match self.peers.get_mut(&(incoming.peer_id.clone(), incoming.set_id)) { - Some(PeerState::DisabledPendingEnable { .. }) | - Some(PeerState::Enabled { .. }) => {} + Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => { + }, _ => { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", incoming.peer_id, incoming.set_id); - self.peerset.dropped(incoming.set_id, incoming.peer_id, sc_peerset::DropReason::Unknown); + self.peerset.dropped( + incoming.set_id, + incoming.peer_id, + sc_peerset::DropReason::Unknown, + ); }, } return @@ -948,8 +973,8 @@ impl Notifications { Some(s) => s, None => { debug_assert!(false); - return; - } + return + }, }; match mem::replace(state, PeerState::Poisoned) { @@ -958,8 +983,11 @@ impl Notifications { trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", index, incoming.peer_id, incoming.set_id); - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); - for (connec_id, connec_state) in connections.iter_mut() + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections + .iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", @@ -973,7 +1001,7 @@ impl Notifications { } *state = PeerState::Enabled { connections }; - } + }, // Any state other than `Incoming` is invalid. peer => { @@ -981,13 +1009,14 @@ impl Notifications { "State mismatch in libp2p: Expected alive incoming. Got {:?}.", peer); debug_assert!(false); - } + }, } } /// Function that is called when the peerset wants us to reject an incoming peer. fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { self.incoming.remove(pos) } else { error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); @@ -1004,8 +1033,8 @@ impl Notifications { Some(s) => s, None => { debug_assert!(false); - return; - } + return + }, }; match mem::replace(state, PeerState::Poisoned) { @@ -1014,8 +1043,11 @@ impl Notifications { trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", index, incoming.peer_id, incoming.set_id); - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); - for (connec_id, connec_state) in connections.iter_mut() + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + for (connec_id, connec_state) in connections + .iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", @@ -1029,10 +1061,10 @@ impl Notifications { } *state = PeerState::Disabled { connections, backoff_until }; - } + }, peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) + peer), } } } @@ -1049,15 +1081,18 @@ impl NetworkBehaviour for Notifications { Vec::new() } - fn inject_connected(&mut self, _: &PeerId) { - } + fn inject_connected(&mut self, _: &PeerId) {} - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { match self.peers.entry((peer_id.clone(), set_id)).or_insert(PeerState::Poisoned) { // Requested | PendingRequest => Enabled - st @ &mut PeerState::Requested | - st @ &mut PeerState::PendingRequest { .. } => { + st @ &mut PeerState::Requested | st @ &mut PeerState::PendingRequest { .. } => { trace!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}): Connection was requested by PSM.", peer_id, set_id, endpoint @@ -1072,12 +1107,11 @@ impl NetworkBehaviour for Notifications { let mut connections = SmallVec::new(); connections.push((*conn, ConnectionState::Opening)); *st = PeerState::Enabled { connections }; - } + }, // Poisoned gets inserted above if the entry was missing. // Ø | Backoff => Disabled - st @ &mut PeerState::Poisoned | - st @ &mut PeerState::Backoff { .. } => { + st @ &mut PeerState::Poisoned | st @ &mut PeerState::Backoff { .. } => { let backoff_until = if let PeerState::Backoff { timer_deadline, .. } = st { Some(*timer_deadline) } else { @@ -1090,7 +1124,7 @@ impl NetworkBehaviour for Notifications { let mut connections = SmallVec::new(); connections.push((*conn, ConnectionState::Closed)); *st = PeerState::Disabled { connections, backoff_until }; - } + }, // In all other states, add this new connection to the list of closed inactive // connections. @@ -1102,14 +1136,21 @@ impl NetworkBehaviour for Notifications { "Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.", peer_id, set_id, endpoint, *conn); connections.push((*conn, ConnectionState::Closed)); - } + }, } } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, _endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + _endpoint: &ConnectedPoint, + ) { for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { - let mut entry = if let Entry::Occupied(entry) = self.peers.entry((peer_id.clone(), set_id)) { + let mut entry = if let Entry::Occupied(entry) = + self.peers.entry((peer_id.clone(), set_id)) + { entry } else { error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); @@ -1139,15 +1180,16 @@ impl NetworkBehaviour for Notifications { self.next_delay_id.0 += 1; let delay = futures_timer::Delay::new(until - now); let peer_id = peer_id.clone(); - self.delays.push(async move { - delay.await; - (delay_id, peer_id, set_id) - }.boxed()); - - *entry.get_mut() = PeerState::Backoff { - timer: delay_id, - timer_deadline: until, - }; + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *entry.get_mut() = + PeerState::Backoff { timer: delay_id, timer_deadline: until }; } else { entry.remove(); } @@ -1177,13 +1219,15 @@ impl NetworkBehaviour for Notifications { if connections.is_empty() { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + self.peerset.dropped( + set_id, + peer_id.clone(), + sc_peerset::DropReason::Unknown, + ); *entry.get_mut() = PeerState::Backoff { timer, timer_deadline }; - } else { - *entry.get_mut() = PeerState::DisabledPendingEnable { - connections, timer_deadline, timer - }; + *entry.get_mut() = + PeerState::DisabledPendingEnable { connections, timer_deadline, timer }; } }, @@ -1195,7 +1239,9 @@ impl NetworkBehaviour for Notifications { peer_id, set_id, *conn ); - debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { connections.remove(pos); @@ -1205,16 +1251,18 @@ impl NetworkBehaviour for Notifications { "inject_connection_closed: State mismatch in the custom protos handler"); } - let no_desired_left = !connections.iter().any(|(_, s)| { - matches!(s, ConnectionState::OpenDesiredByRemote) - }); + let no_desired_left = !connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)); // If no connection is `OpenDesiredByRemote` anymore, clean up the peerset incoming // request. if no_desired_left { // In the incoming state, we don't report "Dropped". Instead we will just // ignore the corresponding Accept/Reject. - if let Some(state) = self.incoming.iter_mut() + if let Some(state) = self + .incoming + .iter_mut() .find(|i| i.alive && i.set_id == set_id && i.peer_id == *peer_id) { state.alive = false; @@ -1233,29 +1281,29 @@ impl NetworkBehaviour for Notifications { self.next_delay_id.0 += 1; let delay = futures_timer::Delay::new(until - now); let peer_id = peer_id.clone(); - self.delays.push(async move { - delay.await; - (delay_id, peer_id, set_id) - }.boxed()); - - *entry.get_mut() = PeerState::Backoff { - timer: delay_id, - timer_deadline: until, - }; + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); + + *entry.get_mut() = + PeerState::Backoff { timer: delay_id, timer_deadline: until }; } else { entry.remove(); } } else { entry.remove(); } - } else if no_desired_left { // If no connection is `OpenDesiredByRemote` anymore, switch to `Disabled`. *entry.get_mut() = PeerState::Disabled { connections, backoff_until }; } else { *entry.get_mut() = PeerState::Incoming { connections, backoff_until }; } - } + }, // Enabled => Enabled | Backoff // Peers are always backed-off when disconnecting while Enabled. @@ -1266,8 +1314,10 @@ impl NetworkBehaviour for Notifications { peer_id, set_id, *conn ); - debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { let (_, state) = connections.remove(pos); @@ -1275,11 +1325,9 @@ impl NetworkBehaviour for Notifications { if let Some((replacement_pos, replacement_sink)) = connections .iter() .enumerate() - .filter_map(|(num, (_, s))| { - match s { - ConnectionState::Open(s) => Some((num, s.clone())), - _ => None - } + .filter_map(|(num, (_, s))| match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None, }) .next() { @@ -1294,7 +1342,8 @@ impl NetworkBehaviour for Notifications { set_id, notifications_sink: replacement_sink, }; - self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + self.events + .push_back(NetworkBehaviourAction::GenerateEvent(event)); } } else { trace!( @@ -1308,7 +1357,6 @@ impl NetworkBehaviour for Notifications { self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } } - } else { error!(target: "sub-libp2p", "inject_connection_closed: State mismatch in the custom protos handler"); @@ -1317,38 +1365,44 @@ impl NetworkBehaviour for Notifications { if connections.is_empty() { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + self.peerset.dropped( + set_id, + peer_id.clone(), + sc_peerset::DropReason::Unknown, + ); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); let delay_id = self.next_delay_id; self.next_delay_id.0 += 1; let delay = futures_timer::Delay::new(Duration::from_secs(ban_dur)); let peer_id = peer_id.clone(); - self.delays.push(async move { - delay.await; - (delay_id, peer_id, set_id) - }.boxed()); + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); *entry.get_mut() = PeerState::Backoff { timer: delay_id, timer_deadline: Instant::now() + Duration::from_secs(ban_dur), }; - - } else if !connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) - { + } else if !connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)) + }) { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); - - *entry.get_mut() = PeerState::Disabled { - connections, - backoff_until: None - }; + self.peerset.dropped( + set_id, + peer_id.clone(), + sc_peerset::DropReason::Unknown, + ); + *entry.get_mut() = PeerState::Disabled { connections, backoff_until: None }; } else { *entry.get_mut() = PeerState::Enabled { connections }; } - } + }, PeerState::Requested | PeerState::PendingRequest { .. } | @@ -1367,10 +1421,14 @@ impl NetworkBehaviour for Notifications { } } - fn inject_disconnected(&mut self, _peer_id: &PeerId) { - } + fn inject_disconnected(&mut self, _peer_id: &PeerId) {} - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn error::Error, + ) { trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); } @@ -1386,26 +1444,33 @@ impl NetworkBehaviour for Notifications { }, // "Basic" situation: we failed to reach a peer that the peerset requested. - st @ PeerState::Requested | - st @ PeerState::PendingRequest { .. } => { + st @ PeerState::Requested | st @ PeerState::PendingRequest { .. } => { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); - self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); + self.peerset.dropped( + set_id, + peer_id.clone(), + sc_peerset::DropReason::Unknown, + ); let now = Instant::now(); let ban_duration = match st { - PeerState::PendingRequest { timer_deadline, .. } if timer_deadline > now => + PeerState::PendingRequest { timer_deadline, .. } + if timer_deadline > now => cmp::max(timer_deadline - now, Duration::from_secs(5)), - _ => Duration::from_secs(5) + _ => Duration::from_secs(5), }; let delay_id = self.next_delay_id; self.next_delay_id.0 += 1; let delay = futures_timer::Delay::new(ban_duration); let peer_id = peer_id.clone(); - self.delays.push(async move { - delay.await; - (delay_id, peer_id, set_id) - }.boxed()); + self.delays.push( + async move { + delay.await; + (delay_id, peer_id, set_id) + } + .boxed(), + ); *entry.into_mut() = PeerState::Backoff { timer: delay_id, @@ -1415,8 +1480,10 @@ impl NetworkBehaviour for Notifications { // We can still get dial failures even if we are already connected to the peer, // as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | - st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { + st @ PeerState::Disabled { .. } | + st @ PeerState::Enabled { .. } | + st @ PeerState::DisabledPendingEnable { .. } | + st @ PeerState::Incoming { .. } => { *entry.into_mut() = st; }, @@ -1429,12 +1496,7 @@ impl NetworkBehaviour for Notifications { } } - fn inject_event( - &mut self, - source: PeerId, - connection: ConnectionId, - event: NotifsHandlerOut, - ) { + fn inject_event(&mut self, source: PeerId, connection: ConnectionId, event: NotifsHandlerOut) { match event { NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); @@ -1443,20 +1505,24 @@ impl NetworkBehaviour for Notifications { "Handler({:?}, {:?}]) => OpenDesiredByRemote({:?})", source, connection, set_id); - let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { - entry - } else { - error!(target: "sub-libp2p", "OpenDesiredByRemote: State mismatch in the custom protos handler"); - debug_assert!(false); - return - }; + let mut entry = + if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { + entry + } else { + error!(target: "sub-libp2p", "OpenDesiredByRemote: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Incoming => Incoming PeerState::Incoming { mut connections, backoff_until } => { - debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::OpenDesiredByRemote))); - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + debug_assert!(connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { if let ConnectionState::Closed = *connec_state { *connec_state = ConnectionState::OpenDesiredByRemote; } else { @@ -1482,10 +1548,14 @@ impl NetworkBehaviour for Notifications { }, PeerState::Enabled { mut connections } => { - debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { if let ConnectionState::Closed = *connec_state { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", source, connection, set_id); @@ -1504,7 +1574,7 @@ impl NetworkBehaviour for Notifications { debug_assert!(matches!( connec_state, ConnectionState::OpenDesiredByRemote | - ConnectionState::Closing | ConnectionState::Opening + ConnectionState::Closing | ConnectionState::Opening )); } } else { @@ -1520,7 +1590,9 @@ impl NetworkBehaviour for Notifications { // Disabled => Disabled | Incoming PeerState::Disabled { mut connections, backoff_until } => { - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { if let ConnectionState::Closed = *connec_state { *connec_state = ConnectionState::OpenDesiredByRemote; @@ -1537,8 +1609,8 @@ impl NetworkBehaviour for Notifications { incoming_id, }); - *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; - + *entry.into_mut() = + PeerState::Incoming { connections, backoff_until }; } else { // Connections in `OpeningThenClosing` and `Closing` state can be // in a Closed phase, and as such can emit `OpenDesiredByRemote` @@ -1548,7 +1620,8 @@ impl NetworkBehaviour for Notifications { connec_state, ConnectionState::OpeningThenClosing | ConnectionState::Closing )); - *entry.into_mut() = PeerState::Disabled { connections, backoff_until }; + *entry.into_mut() = + PeerState::Disabled { connections, backoff_until }; } } else { error!( @@ -1557,11 +1630,13 @@ impl NetworkBehaviour for Notifications { ); debug_assert!(false); } - } + }, // DisabledPendingEnable => Enabled | DisabledPendingEnable PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, _)| *c == connection) + { if let ConnectionState::Closed = *connec_state { trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", source, connection, set_id); @@ -1573,7 +1648,6 @@ impl NetworkBehaviour for Notifications { *connec_state = ConnectionState::Opening; *entry.into_mut() = PeerState::Enabled { connections }; - } else { // Connections in `OpeningThenClosing` and `Closing` state can be // in a Closed phase, and as such can emit `OpenDesiredByRemote` @@ -1596,7 +1670,7 @@ impl NetworkBehaviour for Notifications { ); debug_assert!(false); } - } + }, state => { error!(target: "sub-libp2p", @@ -1604,9 +1678,9 @@ impl NetworkBehaviour for Notifications { state); debug_assert!(false); return - } + }, }; - } + }, NotifsHandlerOut::CloseDesired { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); @@ -1615,32 +1689,37 @@ impl NetworkBehaviour for Notifications { "Handler({}, {:?}) => CloseDesired({:?})", source, connection, set_id); - let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { - entry - } else { - error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); - debug_assert!(false); - return - }; + let mut entry = + if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { + entry + } else { + error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); + debug_assert!(false); + return + }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Enabled => Enabled | Disabled PeerState::Enabled { mut connections } => { - debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); - let pos = if let Some(pos) = connections.iter().position(|(c, _)| *c == connection) { + let pos = if let Some(pos) = + connections.iter().position(|(c, _)| *c == connection) + { pos } else { error!(target: "sub-libp2p", "CloseDesired: State mismatch in the custom protos handler"); debug_assert!(false); - return; + return }; if matches!(connections[pos].1, ConnectionState::Closing) { *entry.into_mut() = PeerState::Enabled { connections }; - return; + return } debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); @@ -1656,11 +1735,9 @@ impl NetworkBehaviour for Notifications { if let Some((replacement_pos, replacement_sink)) = connections .iter() .enumerate() - .filter_map(|(num, (_, s))| { - match s { - ConnectionState::Open(s) => Some((num, s.clone())), - _ => None - } + .filter_map(|(num, (_, s))| match s { + ConnectionState::Open(s) => Some((num, s.clone())), + _ => None, }) .next() { @@ -1675,24 +1752,27 @@ impl NetworkBehaviour for Notifications { } *entry.into_mut() = PeerState::Enabled { connections }; - } else { // List of open connections wasn't empty before but now it is. - if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)) { + if !connections + .iter() + .any(|(_, s)| matches!(s, ConnectionState::Opening)) + { trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", source, set_id); - self.peerset.dropped(set_id, source.clone(), sc_peerset::DropReason::Refused); - *entry.into_mut() = PeerState::Disabled { - connections, backoff_until: None - }; + self.peerset.dropped( + set_id, + source.clone(), + sc_peerset::DropReason::Refused, + ); + *entry.into_mut() = + PeerState::Disabled { connections, backoff_until: None }; } else { *entry.into_mut() = PeerState::Enabled { connections }; } trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", source, set_id); - let event = NotificationsOut::CustomProtocolClosed { - peer_id: source, - set_id, - }; + let event = + NotificationsOut::CustomProtocolClosed { peer_id: source, set_id }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } }, @@ -1702,16 +1782,16 @@ impl NetworkBehaviour for Notifications { state @ PeerState::Disabled { .. } | state @ PeerState::DisabledPendingEnable { .. } => { *entry.into_mut() = state; - return; + return }, state => { error!(target: "sub-libp2p", "Unexpected state in the custom protos handler: {:?}", state); return - } + }, } - } + }, NotifsHandlerOut::CloseResult { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); @@ -1726,10 +1806,9 @@ impl NetworkBehaviour for Notifications { Some(PeerState::DisabledPendingEnable { connections, .. }) | Some(PeerState::Disabled { connections, .. }) | Some(PeerState::Enabled { connections, .. }) => { - if let Some((_, connec_state)) = connections - .iter_mut() - .find(|(c, s)| *c == connection && matches!(s, ConnectionState::Closing)) - { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::Closing) + }) { *connec_state = ConnectionState::Closed; } else { error!(target: "sub-libp2p", @@ -1743,12 +1822,16 @@ impl NetworkBehaviour for Notifications { "CloseResult: Unexpected state in the custom protos handler: {:?}", state); debug_assert!(false); - } + }, } - } + }, NotifsHandlerOut::OpenResultOk { - protocol_index, negotiated_fallback, received_handshake, notifications_sink, .. + protocol_index, + negotiated_fallback, + received_handshake, + notifications_sink, + .. } => { let set_id = sc_peerset::SetId::from(protocol_index); trace!(target: "sub-libp2p", @@ -1757,13 +1840,16 @@ impl NetworkBehaviour for Notifications { match self.peers.get_mut(&(source.clone(), set_id)) { Some(PeerState::Enabled { connections, .. }) => { - debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); - let any_open = connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))); - - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::Opening)) - { + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + let any_open = + connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::Opening) + }) { if !any_open { trace!(target: "sub-libp2p", "External API <= Open({}, {:?})", source, set_id); let event = NotificationsOut::CustomProtocolOpen { @@ -1776,9 +1862,10 @@ impl NetworkBehaviour for Notifications { self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } *connec_state = ConnectionState::Open(notifications_sink); - } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) - { + } else if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::OpeningThenClosing) + }) { *connec_state = ConnectionState::Closing; } else { debug_assert!(false); @@ -1790,16 +1877,16 @@ impl NetworkBehaviour for Notifications { Some(PeerState::Incoming { connections, .. }) | Some(PeerState::DisabledPendingEnable { connections, .. }) | Some(PeerState::Disabled { connections, .. }) => { - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) - { + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::OpeningThenClosing) + }) { *connec_state = ConnectionState::Closing; } else { error!(target: "sub-libp2p", "OpenResultOk State mismatch in the custom protos handler"); debug_assert!(false); } - } + }, state => { error!(target: "sub-libp2p", @@ -1807,9 +1894,9 @@ impl NetworkBehaviour for Notifications { state); debug_assert!(false); return - } + }, } - } + }, NotifsHandlerOut::OpenResultErr { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); @@ -1817,27 +1904,31 @@ impl NetworkBehaviour for Notifications { "Handler({:?}, {:?}) => OpenResultErr({:?})", source, connection, set_id); - let mut entry = if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { - entry - } else { - error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); - debug_assert!(false); - debug_assert!(false); - return - }; + let mut entry = + if let Entry::Occupied(entry) = self.peers.entry((source.clone(), set_id)) { + entry + } else { + error!(target: "sub-libp2p", "OpenResultErr: State mismatch in the custom protos handler"); + debug_assert!(false); + debug_assert!(false); + return + }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { PeerState::Enabled { mut connections } => { - debug_assert!(connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); - - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::Opening)) - { + debug_assert!(connections.iter().any(|(_, s)| matches!( + s, + ConnectionState::Opening | ConnectionState::Open(_) + ))); + + if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::Opening) + }) { *connec_state = ConnectionState::Closed; - } else if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) - { + } else if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, s)| { + *c == connection && matches!(s, ConnectionState::OpeningThenClosing) + }) { *connec_state = ConnectionState::Closing; } else { error!(target: "sub-libp2p", @@ -1845,16 +1936,20 @@ impl NetworkBehaviour for Notifications { debug_assert!(false); } - if !connections.iter().any(|(_, s)| - matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) - { + if !connections.iter().any(|(_, s)| { + matches!(s, ConnectionState::Opening | ConnectionState::Open(_)) + }) { trace!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(set_id, source.clone(), sc_peerset::DropReason::Refused); + self.peerset.dropped( + set_id, + source.clone(), + sc_peerset::DropReason::Refused, + ); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); *entry.into_mut() = PeerState::Disabled { connections, - backoff_until: Some(Instant::now() + Duration::from_secs(ban_dur)) + backoff_until: Some(Instant::now() + Duration::from_secs(ban_dur)), }; } else { *entry.into_mut() = PeerState::Enabled { connections }; @@ -1867,9 +1962,11 @@ impl NetworkBehaviour for Notifications { PeerState::Incoming { connections, .. } | PeerState::Disabled { connections, .. } | PeerState::DisabledPendingEnable { connections, .. } => { - if let Some((_, connec_state)) = connections.iter_mut().find(|(c, s)| - *c == connection && matches!(s, ConnectionState::OpeningThenClosing)) - { + if let Some((_, connec_state)) = + connections.iter_mut().find(|(c, s)| { + *c == connection && + matches!(s, ConnectionState::OpeningThenClosing) + }) { *connec_state = ConnectionState::Closing; } else { error!(target: "sub-libp2p", @@ -1877,20 +1974,22 @@ impl NetworkBehaviour for Notifications { debug_assert!(false); } }, - _ => unreachable!("Match branches are the same as the one on which we - enter this block; qed"), + _ => unreachable!( + "Match branches are the same as the one on which we + enter this block; qed" + ), }; *entry.into_mut() = state; - } + }, state => { error!(target: "sub-libp2p", "Unexpected state in the custom protos handler: {:?}", state); debug_assert!(false); - } + }, }; - } + }, NotifsHandlerOut::Notification { protocol_index, message } => { let set_id = sc_peerset::SetId::from(protocol_index); @@ -1905,11 +2004,7 @@ impl NetworkBehaviour for Notifications { ); trace!(target: "sub-libp2p", "External API <= Message({}, {:?})", source, set_id); - let event = NotificationsOut::Notification { - peer_id: source, - set_id, - message, - }; + let event = NotificationsOut::Notification { peer_id: source, set_id, message }; self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } else { @@ -1922,7 +2017,7 @@ impl NetworkBehaviour for Notifications { message.len() ); } - } + }, } } @@ -1930,14 +2025,9 @@ impl NetworkBehaviour for Notifications { &mut self, cx: &mut Context, _params: &mut impl PollParameters, - ) -> Poll< - NetworkBehaviourAction< - NotifsHandlerIn, - Self::OutEvent, - >, - > { + ) -> Poll> { if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); + return Poll::Ready(event) } // Poll for instructions from the peerset. @@ -1946,26 +2036,27 @@ impl NetworkBehaviour for Notifications { match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) { Poll::Ready(Some(sc_peerset::Message::Accept(index))) => { self.peerset_report_accept(index); - } + }, Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { self.peerset_report_reject(index); - } + }, Poll::Ready(Some(sc_peerset::Message::Connect { peer_id, set_id, .. })) => { self.peerset_report_connect(peer_id, set_id); - } + }, Poll::Ready(Some(sc_peerset::Message::Drop { peer_id, set_id, .. })) => { self.peerset_report_disconnect(peer_id, set_id); - } + }, Poll::Ready(None) => { error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); - break; - } + break + }, Poll::Pending => break, } } while let Poll::Ready(Some((delay_id, peer_id, set_id))) = - Pin::new(&mut self.delays).poll_next(cx) { + Pin::new(&mut self.delays).poll_next(cx) + { let peer_state = match self.peers.get_mut(&(peer_id.clone(), set_id)) { Some(s) => s, // We intentionally never remove elements from `delays`, and it may @@ -1977,24 +2068,24 @@ impl NetworkBehaviour for Notifications { PeerState::Backoff { timer, .. } if *timer == delay_id => { trace!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); self.peers.remove(&(peer_id, set_id)); - } + }, PeerState::PendingRequest { timer, .. } if *timer == delay_id => { trace!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id, - condition: DialPeerCondition::Disconnected + condition: DialPeerCondition::Disconnected, }); *peer_state = PeerState::Requested; - } + }, PeerState::DisabledPendingEnable { connections, timer, timer_deadline } if *timer == delay_id => { // The first element of `closed` is chosen to open the notifications substream. - if let Some((connec_id, connec_state)) = connections.iter_mut() - .find(|(_, s)| matches!(s, ConnectionState::Closed)) + if let Some((connec_id, connec_state)) = + connections.iter_mut().find(|(_, s)| matches!(s, ConnectionState::Closed)) { trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Open({:?}) (ban expired)", peer_id, *connec_id, set_id); @@ -2011,10 +2102,13 @@ impl NetworkBehaviour for Notifications { *timer_deadline = Instant::now() + Duration::from_secs(5); let delay = futures_timer::Delay::new(Duration::from_secs(5)); let timer = *timer; - self.delays.push(async move { - delay.await; - (timer, peer_id, set_id) - }.boxed()); + self.delays.push( + async move { + delay.await; + (timer, peer_id, set_id) + } + .boxed(), + ); } } @@ -2025,7 +2119,7 @@ impl NetworkBehaviour for Notifications { } if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); + return Poll::Ready(event) } Poll::Pending diff --git a/substrate/client/network/src/protocol/notifications/handler.rs b/substrate/client/network/src/protocol/notifications/handler.rs index 3d38182c3c9d6d6461df3cd4ae9951ca259e2213..0a59b2fcf034abd29497b2552d0f42bdaa75ebac 100644 --- a/substrate/client/network/src/protocol/notifications/handler.rs +++ b/substrate/client/network/src/protocol/notifications/handler.rs @@ -57,31 +57,39 @@ //! It is illegal to send a [`NotifsHandlerIn::Open`] before a previously-emitted //! [`NotifsHandlerIn::Open`] has gotten an answer. -use crate::protocol::notifications::{ - upgrade::{ - NotificationsIn, NotificationsOut, NotificationsInSubstream, NotificationsOutSubstream, - NotificationsHandshakeError, UpgradeCollec - }, +use crate::protocol::notifications::upgrade::{ + NotificationsHandshakeError, NotificationsIn, NotificationsInSubstream, NotificationsOut, + NotificationsOutSubstream, UpgradeCollec, }; use bytes::BytesMut; -use libp2p::core::{ConnectedPoint, PeerId, upgrade::{InboundUpgrade, OutboundUpgrade}}; -use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, -}; use futures::{ channel::mpsc, lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, - prelude::* + prelude::*, +}; +use libp2p::{ + core::{ + upgrade::{InboundUpgrade, OutboundUpgrade}, + ConnectedPoint, PeerId, + }, + swarm::{ + IntoProtocolsHandler, KeepAlive, NegotiatedSubstream, ProtocolsHandler, + ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, + }, }; use log::error; use parking_lot::{Mutex, RwLock}; -use std::{borrow::Cow, collections::VecDeque, mem, pin::Pin, str, sync::Arc, task::{Context, Poll}, time::Duration}; +use std::{ + borrow::Cow, + collections::VecDeque, + mem, + pin::Pin, + str, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Instant; /// Number of pending notifications in asynchronous contexts. @@ -131,7 +139,7 @@ pub struct NotifsHandler { /// Events to return in priority from `poll`. events_queue: VecDeque< - ProtocolsHandlerEvent + ProtocolsHandlerEvent, >, } @@ -195,10 +203,12 @@ enum State { /// We use two different channels in order to have two different channel sizes, but from /// the receiving point of view, the two channels are the same. /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. - notifications_sink_rx: stream::Peekable>, - stream::Fuse> - >>, + notifications_sink_rx: stream::Peekable< + stream::Select< + stream::Fuse>, + stream::Fuse>, + >, + >, /// Outbound substream that has been accepted by the remote. /// @@ -220,28 +230,33 @@ impl IntoProtocolsHandler for NotifsHandlerProto { type Handler = NotifsHandler; fn inbound_protocol(&self) -> UpgradeCollec { - self.protocols.iter() - .map(|cfg| NotificationsIn::new(cfg.name.clone(), cfg.fallback_names.clone(), cfg.max_notification_size)) + self.protocols + .iter() + .map(|cfg| { + NotificationsIn::new( + cfg.name.clone(), + cfg.fallback_names.clone(), + cfg.max_notification_size, + ) + }) .collect::>() } fn into_handler(self, peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { NotifsHandler { - protocols: self.protocols.into_iter().map(|config| { - let in_upgrade = NotificationsIn::new( - config.name.clone(), - config.fallback_names.clone(), - config.max_notification_size - ); - - Protocol { - config, - in_upgrade, - state: State::Closed { - pending_opening: false, - }, - } - }).collect(), + protocols: self + .protocols + .into_iter() + .map(|config| { + let in_upgrade = NotificationsIn::new( + config.name.clone(), + config.fallback_names.clone(), + config.max_notification_size, + ); + + Protocol { config, in_upgrade, state: State::Closed { pending_opening: false } } + }) + .collect(), peer_id: peer_id.clone(), endpoint: connected_point.clone(), when_connection_open: Instant::now(), @@ -363,9 +378,7 @@ struct NotificationsSinkInner { enum NotificationsSinkMessage { /// Message emitted by [`NotificationsSink::reserve_notification`] and /// [`NotificationsSink::write_notification_now`]. - Notification { - message: Vec, - }, + Notification { message: Vec }, /// Must close the connection. ForceClose, @@ -386,14 +399,10 @@ impl NotificationsSink { /// error to send a notification using an unknown protocol. /// /// This method will be removed in a future version. - pub fn send_sync_notification<'a>( - &'a self, - message: impl Into> - ) { + pub fn send_sync_notification<'a>(&'a self, message: impl Into>) { let mut lock = self.inner.sync_channel.lock(); - let result = lock.try_send(NotificationsSinkMessage::Notification { - message: message.into() - }); + let result = + lock.try_send(NotificationsSinkMessage::Notification { message: message.into() }); if result.is_err() { // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the @@ -433,13 +442,10 @@ impl<'a> Ready<'a> { /// Consumes this slots reservation and actually queues the notification. /// /// Returns an error if the substream has been closed. - pub fn send( - mut self, - notification: impl Into> - ) -> Result<(), ()> { - self.lock.start_send(NotificationsSinkMessage::Notification { - message: notification.into(), - }).map_err(|_| ()) + pub fn send(mut self, notification: impl Into>) -> Result<(), ()> { + self.lock + .start_send(NotificationsSinkMessage::Notification { message: notification.into() }) + .map_err(|_| ()) } } @@ -457,12 +463,8 @@ impl NotifsHandlerProto { /// handshake, and the maximum allowed size of a notification. At the moment, the message /// is always the same whether we open a substream ourselves or respond to handshake from /// the remote. - pub fn new( - list: impl Into>, - ) -> Self { - NotifsHandlerProto { - protocols: list.into(), - } + pub fn new(list: impl Into>) -> Self { + NotifsHandlerProto { protocols: list.into() } } } @@ -477,7 +479,9 @@ impl ProtocolsHandler for NotifsHandler { type InboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol { - let protocols = self.protocols.iter() + let protocols = self + .protocols + .iter() .map(|p| p.in_upgrade.clone()) .collect::>(); @@ -486,17 +490,16 @@ impl ProtocolsHandler for NotifsHandler { fn inject_fully_negotiated_inbound( &mut self, - (mut in_substream_open, protocol_index): - >::Output, - (): () + (mut in_substream_open, protocol_index): >::Output, + (): (), ) { let mut protocol_info = &mut self.protocols[protocol_index]; match protocol_info.state { State::Closed { pending_opening } => { self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenDesiredByRemote { - protocol_index, - } + NotifsHandlerOut::OpenDesiredByRemote { protocol_index }, )); protocol_info.state = State::OpenDesiredByRemote { @@ -512,13 +515,13 @@ impl ProtocolsHandler for NotifsHandler { // in mind that it is invalid for the remote to open multiple such // substreams, and therefore sending a "RST" is the most correct thing // to do. - return; + return }, State::Opening { ref mut in_substream, .. } | State::Open { ref mut in_substream, .. } => { if in_substream.is_some() { // Same remark as above. - return; + return } // Create `handshake_message` on a separate line to be sure that the @@ -533,18 +536,18 @@ impl ProtocolsHandler for NotifsHandler { fn inject_fully_negotiated_outbound( &mut self, new_open: >::Output, - protocol_index: Self::OutboundOpenInfo + protocol_index: Self::OutboundOpenInfo, ) { match self.protocols[protocol_index].state { State::Closed { ref mut pending_opening } | State::OpenDesiredByRemote { ref mut pending_opening, .. } => { debug_assert!(*pending_opening); *pending_opening = false; - } + }, State::Open { .. } => { error!(target: "sub-libp2p", "☎️ State mismatch in notifications handler"); debug_assert!(false); - } + }, State::Opening { ref mut in_substream } => { let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); @@ -557,7 +560,8 @@ impl ProtocolsHandler for NotifsHandler { }; self.protocols[protocol_index].state = State::Open { - notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()).peekable(), + notifications_sink_rx: stream::select(async_rx.fuse(), sync_rx.fuse()) + .peekable(), out_substream: Some(new_open.substream), in_substream: in_substream.take(), }; @@ -568,10 +572,10 @@ impl ProtocolsHandler for NotifsHandler { negotiated_fallback: new_open.negotiated_fallback, endpoint: self.endpoint.clone(), received_handshake: new_open.handshake, - notifications_sink - } + notifications_sink, + }, )); - } + }, } } @@ -586,18 +590,18 @@ impl ProtocolsHandler for NotifsHandler { protocol_info.config.name.clone(), protocol_info.config.fallback_names.clone(), protocol_info.config.handshake.read().clone(), - protocol_info.config.max_notification_size + protocol_info.config.max_notification_size, ); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, protocol_index) - .with_timeout(OPEN_TIMEOUT), - }); + self.events_queue.push_back( + ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, protocol_index) + .with_timeout(OPEN_TIMEOUT), + }, + ); } - protocol_info.state = State::Opening { - in_substream: None, - }; + protocol_info.state = State::Opening { in_substream: None }; }, State::OpenDesiredByRemote { pending_opening, in_substream } => { let handshake_message = protocol_info.config.handshake.read().clone(); @@ -610,27 +614,27 @@ impl ProtocolsHandler for NotifsHandler { protocol_info.config.max_notification_size, ); - self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto, protocol_index) - .with_timeout(OPEN_TIMEOUT), - }); + self.events_queue.push_back( + ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, protocol_index) + .with_timeout(OPEN_TIMEOUT), + }, + ); } in_substream.send_handshake(handshake_message); // The state change is done in two steps because of borrowing issues. - let in_substream = match - mem::replace(&mut protocol_info.state, State::Opening { in_substream: None }) - { + let in_substream = match mem::replace( + &mut protocol_info.state, + State::Opening { in_substream: None }, + ) { State::OpenDesiredByRemote { in_substream, .. } => in_substream, - _ => unreachable!() - }; - protocol_info.state = State::Opening { - in_substream: Some(in_substream), + _ => unreachable!(), }; + protocol_info.state = State::Opening { in_substream: Some(in_substream) }; }, - State::Opening { .. } | - State::Open { .. } => { + State::Opening { .. } | State::Open { .. } => { // As documented, it is forbidden to send an `Open` while there is already // one in the fly. error!(target: "sub-libp2p", "opening already-opened handler"); @@ -642,34 +646,26 @@ impl ProtocolsHandler for NotifsHandler { NotifsHandlerIn::Close { protocol_index } => { match self.protocols[protocol_index].state { State::Open { .. } => { - self.protocols[protocol_index].state = State::Closed { - pending_opening: false, - }; + self.protocols[protocol_index].state = + State::Closed { pending_opening: false }; }, State::Opening { .. } => { - self.protocols[protocol_index].state = State::Closed { - pending_opening: true, - }; + self.protocols[protocol_index].state = + State::Closed { pending_opening: true }; self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultErr { - protocol_index, - } + NotifsHandlerOut::OpenResultErr { protocol_index }, )); }, State::OpenDesiredByRemote { pending_opening, .. } => { - self.protocols[protocol_index].state = State::Closed { - pending_opening, - }; - } + self.protocols[protocol_index].state = State::Closed { pending_opening }; + }, State::Closed { .. } => {}, } - self.events_queue.push_back( - ProtocolsHandlerEvent::Custom(NotifsHandlerOut::CloseResult { - protocol_index, - }) - ); + self.events_queue.push_back(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CloseResult { protocol_index }, + )); }, } } @@ -677,26 +673,22 @@ impl ProtocolsHandler for NotifsHandler { fn inject_dial_upgrade_error( &mut self, num: usize, - _: ProtocolsHandlerUpgrErr + _: ProtocolsHandlerUpgrErr, ) { match self.protocols[num].state { State::Closed { ref mut pending_opening } | State::OpenDesiredByRemote { ref mut pending_opening, .. } => { debug_assert!(*pending_opening); *pending_opening = false; - } + }, State::Opening { .. } => { - self.protocols[num].state = State::Closed { - pending_opening: false, - }; + self.protocols[num].state = State::Closed { pending_opening: false }; self.events_queue.push_back(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::OpenResultErr { - protocol_index: num, - } + NotifsHandlerOut::OpenResultErr { protocol_index: num }, )); - } + }, // No substream is being open when already `Open`. State::Open { .. } => debug_assert!(false), @@ -706,7 +698,7 @@ impl ProtocolsHandler for NotifsHandler { fn connection_keep_alive(&self) -> KeepAlive { // `Yes` if any protocol has some activity. if self.protocols.iter().any(|p| !matches!(p.state, State::Closed { .. })) { - return KeepAlive::Yes; + return KeepAlive::Yes } // A grace period of `INITIAL_KEEPALIVE_TIME` must be given to leave time for the remote @@ -718,28 +710,33 @@ impl ProtocolsHandler for NotifsHandler { &mut self, cx: &mut Context, ) -> Poll< - ProtocolsHandlerEvent + ProtocolsHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, > { if let Some(ev) = self.events_queue.pop_front() { - return Poll::Ready(ev); + return Poll::Ready(ev) } // For each open substream, try send messages from `notifications_sink_rx` to the // substream. for protocol_index in 0..self.protocols.len() { - if let State::Open { notifications_sink_rx, out_substream: Some(out_substream), .. } - = &mut self.protocols[protocol_index].state + if let State::Open { + notifications_sink_rx, out_substream: Some(out_substream), .. + } = &mut self.protocols[protocol_index].state { loop { // Only proceed with `out_substream.poll_ready_unpin` if there is an element // available in `notifications_sink_rx`. This avoids waking up the task when // a substream is ready to send if there isn't actually something to send. match Pin::new(&mut *notifications_sink_rx).as_mut().poll_peek(cx) { - Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => { - return Poll::Ready( - ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged) - ); - }, + Poll::Ready(Some(&NotificationsSinkMessage::ForceClose)) => + return Poll::Ready(ProtocolsHandlerEvent::Close( + NotifsHandlerError::SyncNotificationsClogged, + )), Poll::Ready(Some(&NotificationsSinkMessage::Notification { .. })) => {}, Poll::Ready(None) | Poll::Pending => break, } @@ -748,19 +745,20 @@ impl ProtocolsHandler for NotifsHandler { // substream is ready to accept a message. match out_substream.poll_ready_unpin(cx) { Poll::Ready(_) => {}, - Poll::Pending => break + Poll::Pending => break, } // Now that the substream is ready for a message, grab what to send. let message = match notifications_sink_rx.poll_next_unpin(cx) { - Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => message, - Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) - | Poll::Ready(None) - | Poll::Pending => { + Poll::Ready(Some(NotificationsSinkMessage::Notification { message })) => + message, + Poll::Ready(Some(NotificationsSinkMessage::ForceClose)) | + Poll::Ready(None) | + Poll::Pending => { // Should never be reached, as per `poll_peek` above. debug_assert!(false); - break; - } + break + }, }; let _ = out_substream.start_send_unpin(message); @@ -784,15 +782,15 @@ impl ProtocolsHandler for NotifsHandler { Poll::Ready(Err(_)) => { *out_substream = None; let event = NotifsHandlerOut::CloseDesired { protocol_index }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(event)); - } + return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) + }, }; - } + }, State::Closed { .. } | State::Opening { .. } | State::Open { out_substream: None, .. } | - State::OpenDesiredByRemote { .. } => {} + State::OpenDesiredByRemote { .. } => {}, } } @@ -803,45 +801,40 @@ impl ProtocolsHandler for NotifsHandler { match &mut self.protocols[protocol_index].state { State::Closed { .. } | State::Open { in_substream: None, .. } | - State::Opening { in_substream: None } => {} + State::Opening { in_substream: None } => {}, - State::Open { in_substream: in_substream @ Some(_), .. } => { + State::Open { in_substream: in_substream @ Some(_), .. } => match Stream::poll_next(Pin::new(in_substream.as_mut().unwrap()), cx) { Poll::Pending => {}, Poll::Ready(Some(Ok(message))) => { - let event = NotifsHandlerOut::Notification { - protocol_index, - message, - }; + let event = NotifsHandlerOut::Notification { protocol_index, message }; return Poll::Ready(ProtocolsHandlerEvent::Custom(event)) }, - Poll::Ready(None) | Poll::Ready(Some(Err(_))) => - *in_substream = None, - } - } + Poll::Ready(None) | Poll::Ready(Some(Err(_))) => *in_substream = None, + }, - State::OpenDesiredByRemote { in_substream, pending_opening } => { + State::OpenDesiredByRemote { in_substream, pending_opening } => match NotificationsInSubstream::poll_process(Pin::new(in_substream), cx) { Poll::Pending => {}, Poll::Ready(Ok(void)) => match void {}, Poll::Ready(Err(_)) => { - self.protocols[protocol_index].state = State::Closed { - pending_opening: *pending_opening, - }; + self.protocols[protocol_index].state = + State::Closed { pending_opening: *pending_opening }; return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CloseDesired { protocol_index } + NotifsHandlerOut::CloseDesired { protocol_index }, )) }, - } - } + }, - State::Opening { in_substream: in_substream @ Some(_), .. } => { - match NotificationsInSubstream::poll_process(Pin::new(in_substream.as_mut().unwrap()), cx) { + State::Opening { in_substream: in_substream @ Some(_), .. } => + match NotificationsInSubstream::poll_process( + Pin::new(in_substream.as_mut().unwrap()), + cx, + ) { Poll::Pending => {}, Poll::Ready(Ok(void)) => match void {}, Poll::Ready(Err(_)) => *in_substream = None, - } - } + }, } } diff --git a/substrate/client/network/src/protocol/notifications/tests.rs b/substrate/client/network/src/protocol/notifications/tests.rs index 4c7461c94b20d33360a8f272deb2e3781279a545..a80315050830b8a3bfdb03147375f5e3c259f58e 100644 --- a/substrate/client/network/src/protocol/notifications/tests.rs +++ b/substrate/client/network/src/protocol/notifications/tests.rs @@ -21,19 +21,24 @@ use crate::protocol::notifications::{Notifications, NotificationsOut, ProtocolConfig}; use futures::prelude::*; -use libp2p::{PeerId, Multiaddr, Transport}; -use libp2p::core::{ - connection::{ConnectionId, ListenerId}, - ConnectedPoint, - transport::MemoryTransport, - upgrade +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + transport::MemoryTransport, + upgrade, ConnectedPoint, + }, + identity, noise, + swarm::{ + IntoProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, + ProtocolsHandler, Swarm, + }, + yamux, Multiaddr, PeerId, Transport, }; -use libp2p::{identity, noise, yamux}; -use libp2p::swarm::{ - Swarm, ProtocolsHandler, IntoProtocolsHandler, PollParameters, - NetworkBehaviour, NetworkBehaviourAction +use std::{ + error, io, iter, + task::{Context, Poll}, + time::Duration, }; -use std::{error, io, iter, task::{Context, Poll}, time::Duration}; /// Builds two nodes that have each other as bootstrap nodes. /// This is to be used only for testing, and a panic will happen if something goes wrong. @@ -45,12 +50,11 @@ fn build_nodes() -> (Swarm, Swarm) { .map(|_| format!("/memory/{}", rand::random::()).parse().unwrap()) .collect(); - for index in 0 .. 2 { + for index in 0..2 { let keypair = keypairs[index].clone(); - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); + let noise_keys = + noise::Keypair::::new().into_authentic(&keypair).unwrap(); let transport = MemoryTransport .upgrade(upgrade::Version::V1) @@ -60,48 +64,43 @@ fn build_nodes() -> (Swarm, Swarm) { .boxed(); let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { - sets: vec![ - sc_peerset::SetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: if index == 0 { - keypairs - .iter() - .skip(1) - .map(|keypair| keypair.public().into_peer_id()) - .collect() - } else { - vec![] - }, - reserved_nodes: Default::default(), - reserved_only: false, - } - ], + sets: vec![sc_peerset::SetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: if index == 0 { + keypairs.iter().skip(1).map(|keypair| keypair.public().into_peer_id()).collect() + } else { + vec![] + }, + reserved_nodes: Default::default(), + reserved_only: false, + }], }); let behaviour = CustomProtoWithAddr { - inner: Notifications::new(peerset, iter::once(ProtocolConfig { - name: "/foo".into(), - fallback_names: Vec::new(), - handshake: Vec::new(), - max_notification_size: 1024 * 1024 - })), + inner: Notifications::new( + peerset, + iter::once(ProtocolConfig { + name: "/foo".into(), + fallback_names: Vec::new(), + handshake: Vec::new(), + max_notification_size: 1024 * 1024, + }), + ), addrs: addrs .iter() .enumerate() - .filter_map(|(n, a)| if n != index { - Some((keypairs[n].public().into_peer_id(), a.clone())) - } else { - None + .filter_map(|(n, a)| { + if n != index { + Some((keypairs[n].public().into_peer_id(), a.clone())) + } else { + None + } }) .collect(), }; - let mut swarm = Swarm::new( - transport, - behaviour, - keypairs[index].public().into_peer_id() - ); + let mut swarm = Swarm::new(transport, behaviour, keypairs[index].public().into_peer_id()); swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -159,11 +158,21 @@ impl NetworkBehaviour for CustomProtoWithAddr { self.inner.inject_disconnected(peer_id) } - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.inner.inject_connection_established(peer_id, conn, endpoint) } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { self.inner.inject_connection_closed(peer_id, conn, endpoint) } @@ -171,7 +180,7 @@ impl NetworkBehaviour for CustomProtoWithAddr { &mut self, peer_id: PeerId, connection: ConnectionId, - event: <::Handler as ProtocolsHandler>::OutEvent + event: <::Handler as ProtocolsHandler>::OutEvent, ) { self.inner.inject_event(peer_id, connection, event) } @@ -185,11 +194,16 @@ impl NetworkBehaviour for CustomProtoWithAddr { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { + >{ self.inner.poll(cx, params) } - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { self.inner.inject_addr_reach_failure(peer_id, addr, error) } @@ -235,7 +249,12 @@ fn reconnect_after_disconnect() { // For this test, the services can be in the following states. #[derive(Debug, Copy, Clone, PartialEq, Eq)] - enum ServiceState { NotConnected, FirstConnec, Disconnected, ConnectedAgain } + enum ServiceState { + NotConnected, + FirstConnec, + Disconnected, + ConnectedAgain, + } let mut service1_state = ServiceState::NotConnected; let mut service2_state = ServiceState::NotConnected; @@ -253,55 +272,55 @@ fn reconnect_after_disconnect() { }; match event { - future::Either::Left(NotificationsOut::CustomProtocolOpen { .. }) => { + future::Either::Left(NotificationsOut::CustomProtocolOpen { .. }) => match service1_state { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; if service2_state == ServiceState::FirstConnec { service1.behaviour_mut().disconnect_peer( Swarm::local_peer_id(&service2), - sc_peerset::SetId::from(0) + sc_peerset::SetId::from(0), ); } }, ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - } - }, - future::Either::Left(NotificationsOut::CustomProtocolClosed { .. }) => { + }, + future::Either::Left(NotificationsOut::CustomProtocolClosed { .. }) => match service1_state { ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain| ServiceState::NotConnected | + ServiceState::ConnectedAgain | + ServiceState::NotConnected | ServiceState::Disconnected => panic!(), - } - }, - future::Either::Right(NotificationsOut::CustomProtocolOpen { .. }) => { + }, + future::Either::Right(NotificationsOut::CustomProtocolOpen { .. }) => match service2_state { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; if service1_state == ServiceState::FirstConnec { service1.behaviour_mut().disconnect_peer( Swarm::local_peer_id(&service2), - sc_peerset::SetId::from(0) + sc_peerset::SetId::from(0), ); } }, ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - } - }, - future::Either::Right(NotificationsOut::CustomProtocolClosed { .. }) => { + }, + future::Either::Right(NotificationsOut::CustomProtocolClosed { .. }) => match service2_state { ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain| ServiceState::NotConnected | + ServiceState::ConnectedAgain | + ServiceState::NotConnected | ServiceState::Disconnected => panic!(), - } - }, - _ => {} + }, + _ => {}, } - if service1_state == ServiceState::ConnectedAgain && service2_state == ServiceState::ConnectedAgain { - break; + if service1_state == ServiceState::ConnectedAgain && + service2_state == ServiceState::ConnectedAgain + { + break } } @@ -316,7 +335,7 @@ fn reconnect_after_disconnect() { let s2 = service2.next(); futures::pin_mut!(s1, s2); match future::select(future::select(s1, s2), &mut delay).await { - future::Either::Right(_) => break, // success + future::Either::Right(_) => break, // success future::Either::Left((future::Either::Left((ev, _)), _)) => ev, future::Either::Left((future::Either::Right((ev, _)), _)) => ev, } @@ -325,7 +344,7 @@ fn reconnect_after_disconnect() { match event { NotificationsOut::CustomProtocolOpen { .. } | NotificationsOut::CustomProtocolClosed { .. } => panic!(), - _ => {} + _ => {}, } } }); diff --git a/substrate/client/network/src/protocol/notifications/upgrade.rs b/substrate/client/network/src/protocol/notifications/upgrade.rs index 35ae6917272a271952ab15424b5ad6bd8c4ba336..196b4f44f81f78183c270b77adab864ed119c8be 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade.rs @@ -16,16 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -pub use self::collec::UpgradeCollec; -pub use self::notifications::{ - NotificationsIn, - NotificationsInOpen, - NotificationsInSubstream, - NotificationsOut, - NotificationsOutOpen, - NotificationsOutSubstream, - NotificationsHandshakeError, - NotificationsOutError, +pub use self::{ + collec::UpgradeCollec, + notifications::{ + NotificationsHandshakeError, NotificationsIn, NotificationsInOpen, + NotificationsInSubstream, NotificationsOut, NotificationsOutError, NotificationsOutOpen, + NotificationsOutSubstream, + }, }; mod collec; diff --git a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs index 8531fb8bdfdbff880ea25bfbe08d7c0304526f5f..8a2a7f7942025994ae8d09c8ed05ca0bfc1f63fe 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/collec.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/collec.rs @@ -18,7 +18,12 @@ use futures::prelude::*; use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; -use std::{iter::FromIterator, pin::Pin, task::{Context, Poll}, vec}; +use std::{ + iter::FromIterator, + pin::Pin, + task::{Context, Poll}, + vec, +}; // TODO: move this to libp2p => https://github.com/libp2p/rust-libp2p/issues/1445 @@ -44,9 +49,10 @@ impl UpgradeInfo for UpgradeCollec { type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.0.iter().enumerate() - .flat_map(|(n, p)| - p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) + self.0 + .iter() + .enumerate() + .flat_map(|(n, p)| p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) .collect::>() .into_iter() } diff --git a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs index 26bb92d77656b5b547df2302c85a06c4ab8f33a2..d01b1b5054f64862c851b247a2c7c066b4a4380c 100644 --- a/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs +++ b/substrate/client/network/src/protocol/notifications/upgrade/notifications.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use asynchronous_codec::Framed; /// Notifications protocol. /// /// The Substrate notifications protocol consists in the following: @@ -34,14 +35,18 @@ /// /// Notification substreams are unidirectional. If A opens a substream with B, then B is /// encouraged but not required to open a substream to A as well. -/// - use bytes::BytesMut; use futures::prelude::*; -use asynchronous_codec::Framed; -use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use log::error; -use std::{borrow::Cow, convert::{Infallible, TryFrom as _}, io, mem, pin::Pin, task::{Context, Poll}, vec}; +use std::{ + borrow::Cow, + convert::{Infallible, TryFrom as _}, + io, mem, + pin::Pin, + task::{Context, Poll}, + vec, +}; use unsigned_varint::codec::UviBytes; /// Maximum allowed size of the two handshake messages, in bytes. @@ -111,15 +116,12 @@ impl NotificationsIn { pub fn new( main_protocol_name: impl Into>, fallback_names: Vec>, - max_notification_size: u64 + max_notification_size: u64, ) -> Self { let mut protocol_names = fallback_names; protocol_names.insert(0, main_protocol_name.into()); - NotificationsIn { - protocol_names, - max_notification_size, - } + NotificationsIn { protocol_names, max_notification_size } } } @@ -128,29 +130,31 @@ impl UpgradeInfo for NotificationsIn { type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.protocol_names.iter().cloned().map(StringProtocolName).collect::>().into_iter() + self.protocol_names + .iter() + .cloned() + .map(StringProtocolName) + .collect::>() + .into_iter() } } impl InboundUpgrade for NotificationsIn -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +where + TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = NotificationsInOpen; type Future = Pin> + Send>>; type Error = NotificationsHandshakeError; - fn upgrade_inbound( - self, - mut socket: TSubstream, - negotiated_name: Self::Info, - ) -> Self::Future { + fn upgrade_inbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; if handshake_len > MAX_HANDSHAKE_SIZE { return Err(NotificationsHandshakeError::TooLarge { requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }); + }) } let mut handshake = vec![0u8; handshake_len]; @@ -191,13 +195,14 @@ pub struct NotificationsInOpen { } impl NotificationsInSubstream -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { /// Sends the handshake in order to inform the remote that we accept the substream. pub fn send_handshake(&mut self, message: impl Into>) { if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { error!(target: "sub-libp2p", "Tried to send handshake twice"); - return; + return } self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); @@ -205,7 +210,10 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, /// Equivalent to `Stream::poll_next`, except that it only drives the handshake and is /// guaranteed to not generate any notification. - pub fn poll_process(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + pub fn poll_process( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll> { let mut this = self.project(); loop { @@ -222,7 +230,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); return Poll::Pending - } + }, }, NotificationsInSubstreamHandshake::Flush => match Sink::poll_flush(this.socket.as_mut(), cx)? { @@ -231,7 +239,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; return Poll::Pending - } + }, }, st @ NotificationsInSubstreamHandshake::NotSent | @@ -239,15 +247,16 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote | st @ NotificationsInSubstreamHandshake::BothSidesClosed => { *this.handshake = st; - return Poll::Pending; - } + return Poll::Pending + }, } } } } impl Stream for NotificationsInSubstream -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { type Item = Result; @@ -273,7 +282,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); return Poll::Pending - } + }, }, NotificationsInSubstreamHandshake::Flush => match Sink::poll_flush(this.socket.as_mut(), cx)? { @@ -282,13 +291,14 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Pending => { *this.handshake = NotificationsInSubstreamHandshake::Flush; return Poll::Pending - } + }, }, NotificationsInSubstreamHandshake::Sent => { match Stream::poll_next(this.socket.as_mut(), cx) { - Poll::Ready(None) => *this.handshake = - NotificationsInSubstreamHandshake::ClosingInResponseToRemote, + Poll::Ready(None) => + *this.handshake = + NotificationsInSubstreamHandshake::ClosingInResponseToRemote, Poll::Ready(Some(msg)) => { *this.handshake = NotificationsInSubstreamHandshake::Sent; return Poll::Ready(Some(msg)) @@ -305,13 +315,13 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Poll::Ready(()) => *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed, Poll::Pending => { - *this.handshake = NotificationsInSubstreamHandshake::ClosingInResponseToRemote; + *this.handshake = + NotificationsInSubstreamHandshake::ClosingInResponseToRemote; return Poll::Pending - } + }, }, - NotificationsInSubstreamHandshake::BothSidesClosed => - return Poll::Ready(None), + NotificationsInSubstreamHandshake::BothSidesClosed => return Poll::Ready(None), } } } @@ -333,11 +343,7 @@ impl NotificationsOut { let mut protocol_names = fallback_names; protocol_names.insert(0, main_protocol_name.into()); - NotificationsOut { - protocol_names, - initial_message, - max_notification_size, - } + NotificationsOut { protocol_names, initial_message, max_notification_size } } } @@ -356,22 +362,24 @@ impl UpgradeInfo for NotificationsOut { type InfoIter = vec::IntoIter; fn protocol_info(&self) -> Self::InfoIter { - self.protocol_names.iter().cloned().map(StringProtocolName).collect::>().into_iter() + self.protocol_names + .iter() + .cloned() + .map(StringProtocolName) + .collect::>() + .into_iter() } } impl OutboundUpgrade for NotificationsOut -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +where + TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = NotificationsOutOpen; type Future = Pin> + Send>>; type Error = NotificationsHandshakeError; - fn upgrade_outbound( - self, - mut socket: TSubstream, - negotiated_name: Self::Info, - ) -> Self::Future { + fn upgrade_outbound(self, mut socket: TSubstream, negotiated_name: Self::Info) -> Self::Future { Box::pin(async move { upgrade::write_with_len_prefix(&mut socket, &self.initial_message).await?; @@ -381,7 +389,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, return Err(NotificationsHandshakeError::TooLarge { requested: handshake_len, max: MAX_HANDSHAKE_SIZE, - }); + }) } let mut handshake = vec![0u8; handshake_len]; @@ -399,9 +407,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, } else { Some(negotiated_name.0) }, - substream: NotificationsOutSubstream { - socket: Framed::new(socket, codec), - } + substream: NotificationsOutSubstream { socket: Framed::new(socket, codec) }, }) }) } @@ -419,14 +425,14 @@ pub struct NotificationsOutOpen { } impl Sink> for NotificationsOutSubstream - where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { type Error = NotificationsOutError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); - Sink::poll_ready(this.socket.as_mut(), cx) - .map_err(NotificationsOutError::Io) + Sink::poll_ready(this.socket.as_mut(), cx).map_err(NotificationsOutError::Io) } fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { @@ -437,14 +443,12 @@ impl Sink> for NotificationsOutSubstream fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); - Sink::poll_flush(this.socket.as_mut(), cx) - .map_err(NotificationsOutError::Io) + Sink::poll_flush(this.socket.as_mut(), cx).map_err(NotificationsOutError::Io) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let mut this = self.project(); - Sink::poll_close(this.socket.as_mut(), cx) - .map_err(NotificationsOutError::Io) + Sink::poll_close(this.socket.as_mut(), cx).map_err(NotificationsOutError::Io) } } @@ -471,11 +475,12 @@ impl From for NotificationsHandshakeError { fn from(err: unsigned_varint::io::ReadError) -> Self { match err { unsigned_varint::io::ReadError::Io(err) => NotificationsHandshakeError::Io(err), - unsigned_varint::io::ReadError::Decode(err) => NotificationsHandshakeError::VarintDecode(err), + unsigned_varint::io::ReadError::Decode(err) => + NotificationsHandshakeError::VarintDecode(err), _ => { log::warn!("Unrecognized varint decoding error"); NotificationsHandshakeError::Io(From::from(io::ErrorKind::InvalidData)) - } + }, } } } @@ -492,7 +497,7 @@ mod tests { use super::{NotificationsIn, NotificationsInOpen, NotificationsOut, NotificationsOutOpen}; use async_std::net::{TcpListener, TcpStream}; - use futures::{prelude::*, channel::oneshot}; + use futures::{channel::oneshot, prelude::*}; use libp2p::core::upgrade; use std::borrow::Cow; @@ -506,8 +511,10 @@ mod tests { let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( socket, NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1 - ).await.unwrap(); + upgrade::Version::V1, + ) + .await + .unwrap(); assert_eq!(handshake, b"hello world"); substream.send(b"test message".to_vec()).await.unwrap(); @@ -520,8 +527,10 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024) - ).await.unwrap(); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); assert_eq!(handshake, b"initial message"); substream.send_handshake(&b"hello world"[..]); @@ -545,8 +554,10 @@ mod tests { let NotificationsOutOpen { handshake, mut substream, .. } = upgrade::apply_outbound( socket, NotificationsOut::new(PROTO_NAME, Vec::new(), vec![], 1024 * 1024), - upgrade::Version::V1 - ).await.unwrap(); + upgrade::Version::V1, + ) + .await + .unwrap(); assert!(handshake.is_empty()); substream.send(Default::default()).await.unwrap(); @@ -559,8 +570,10 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024) - ).await.unwrap(); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); assert!(handshake.is_empty()); substream.send_handshake(vec![]); @@ -582,8 +595,9 @@ mod tests { let outcome = upgrade::apply_outbound( socket, NotificationsOut::new(PROTO_NAME, Vec::new(), &b"hello"[..], 1024 * 1024), - upgrade::Version::V1 - ).await; + upgrade::Version::V1, + ) + .await; // Despite the protocol negotiation being successfully conducted on the listener // side, we have to receive an error here because the listener didn't send the @@ -598,8 +612,10 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let NotificationsInOpen { handshake, substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024) - ).await.unwrap(); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); assert_eq!(handshake, b"hello"); @@ -620,9 +636,15 @@ mod tests { let ret = upgrade::apply_outbound( socket, // We check that an initial message that is too large gets refused. - NotificationsOut::new(PROTO_NAME, Vec::new(), (0..32768).map(|_| 0).collect::>(), 1024 * 1024), - upgrade::Version::V1 - ).await; + NotificationsOut::new( + PROTO_NAME, + Vec::new(), + (0..32768).map(|_| 0).collect::>(), + 1024 * 1024, + ), + upgrade::Version::V1, + ) + .await; assert!(ret.is_err()); }); @@ -633,8 +655,9 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let ret = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024) - ).await; + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await; assert!(ret.is_err()); }); @@ -651,8 +674,9 @@ mod tests { let ret = upgrade::apply_outbound( socket, NotificationsOut::new(PROTO_NAME, Vec::new(), &b"initial message"[..], 1024 * 1024), - upgrade::Version::V1 - ).await; + upgrade::Version::V1, + ) + .await; assert!(ret.is_err()); }); @@ -663,8 +687,10 @@ mod tests { let (socket, _) = listener.accept().await.unwrap(); let NotificationsInOpen { handshake, mut substream, .. } = upgrade::apply_inbound( socket, - NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024) - ).await.unwrap(); + NotificationsIn::new(PROTO_NAME, Vec::new(), 1024 * 1024), + ) + .await + .unwrap(); assert_eq!(handshake, b"initial message"); // We check that a handshake that is too large gets refused. diff --git a/substrate/client/network/src/protocol/sync.rs b/substrate/client/network/src/protocol/sync.rs index 55b64c157c65eb4c88398ece39713e1333ffd73d..0ed1bb13256ab4e4cb77f588b902b1ee2996f71e 100644 --- a/substrate/client/network/src/protocol/sync.rs +++ b/substrate/client/network/src/protocol/sync.rs @@ -27,37 +27,41 @@ //! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on //! the network, or whenever a block has been successfully verified, call the appropriate method in //! order to update it. -//! -use codec::Encode; -use blocks::BlockCollection; -use state::StateSync; -use sp_blockchain::{Error as ClientError, HeaderMetadata}; -use sp_consensus::{BlockOrigin, BlockStatus, - block_validation::{BlockAnnounceValidator, Validation}, - import_queue::{IncomingBlock, BlockImportResult, BlockImportError} -}; -use crate::protocol::message::{ - self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, +use crate::{ + protocol::message::{self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse}, + schema::v1::{StateRequest, StateResponse}, }; -use crate::schema::v1::{StateResponse, StateRequest}; +use blocks::BlockCollection; +use codec::Encode; use either::Either; use extra_requests::ExtraRequests; +use futures::{stream::FuturesUnordered, task::Poll, Future, FutureExt, StreamExt}; use libp2p::PeerId; -use log::{debug, trace, warn, info, error}; +use log::{debug, error, info, trace, warn}; +use sp_arithmetic::traits::Saturating; +use sp_blockchain::{Error as ClientError, HeaderMetadata}; +use sp_consensus::{ + block_validation::{BlockAnnounceValidator, Validation}, + import_queue::{BlockImportError, BlockImportResult, IncomingBlock}, + BlockOrigin, BlockStatus, +}; use sp_runtime::{ - EncodedJustification, Justifications, generic::BlockId, traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, CheckedSub, SaturatedConversion, - Hash, HashFor, + Block as BlockT, CheckedSub, Hash, HashFor, Header as HeaderT, NumberFor, One, + SaturatedConversion, Zero, }, + EncodedJustification, Justifications, }; -use sp_arithmetic::traits::Saturating; +use state::StateSync; use std::{ - fmt, ops::Range, collections::{HashMap, hash_map::Entry, HashSet}, sync::Arc, pin::Pin, + collections::{hash_map::Entry, HashMap, HashSet}, + fmt, + ops::Range, + pin::Pin, + sync::Arc, }; -use futures::{task::Poll, Future, stream::FuturesUnordered, FutureExt, StreamExt}; mod blocks; mod extra_requests; @@ -126,7 +130,7 @@ mod rep { pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); /// Reputation change when a peer sent us invlid ancestry result. - pub const UNKNOWN_ANCESTOR:Rep = Rep::new(-(1 << 16), "DB Error"); + pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); /// Peer response data does not have requested bits. pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); @@ -142,7 +146,7 @@ impl PendingRequests { match self { PendingRequests::Some(set) => { set.insert(id.clone()); - } + }, PendingRequests::All => {}, } } @@ -207,9 +211,8 @@ pub struct ChainSync { /// Total number of downloaded blocks. downloaded_blocks: usize, /// All block announcement that are currently being validated. - block_announce_validation: FuturesUnordered< - Pin> + Send>> - >, + block_announce_validation: + FuturesUnordered> + Send>>>, /// Stats per peer about the number of concurrent block announce validations. block_announce_validation_per_peer_stats: HashMap, /// State sync in progress, if any. @@ -258,7 +261,7 @@ pub struct PeerInfo { /// Their best block hash. pub best_hash: B::Hash, /// Their best block number. - pub best_number: NumberFor + pub best_number: NumberFor, } struct ForkTarget { @@ -276,11 +279,7 @@ pub enum PeerSyncState { /// Available for sync requests. Available, /// Searching for ancestors the Peer has in common with us. - AncestorSearch { - start: NumberFor, - current: NumberFor, - state: AncestorSearchState, - }, + AncestorSearch { start: NumberFor, current: NumberFor, state: AncestorSearchState }, /// Actively downloading new blocks, starting from the given Number. DownloadingNew(NumberFor), /// Downloading a stale block with given Hash. Stale means that it is a @@ -305,7 +304,7 @@ pub enum SyncState { /// Initial sync is complete, keep-up sync is active. Idle, /// Actively catching up with the chain. - Downloading + Downloading, } /// Reported state download progress. @@ -350,7 +349,7 @@ pub enum OnBlockData { /// The block should be imported. Import(BlockOrigin, Vec>), /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest) + Request(PeerId, BlockRequest), } impl OnBlockData { @@ -371,7 +370,7 @@ pub enum OnStateData { /// The block and state that should be imported. Import(BlockOrigin, IncomingBlock), /// A new state request needs to be made to the given peer. - Request(PeerId, StateRequest) + Request(PeerId, StateRequest), } /// Result of [`ChainSync::poll_block_announce_validation`]. @@ -435,9 +434,7 @@ enum PreValidateBlockAnnounce { /// An error means that *this* node failed to validate it because some internal error happened. /// If the block announcement was invalid, [`Self::Failure`] is the correct variant to express /// this. - Error { - who: PeerId, - }, + Error { who: PeerId }, /// The block announcement should be skipped. /// /// This should *only* be returned when there wasn't a slot registered @@ -451,15 +448,9 @@ pub enum OnBlockJustification { /// The justification needs no further handling. Nothing, /// The justification should be imported. - Import { - peer: PeerId, - hash: B::Hash, - number: NumberFor, - justifications: Justifications - } + Import { peer: PeerId, hash: B::Hash, number: NumberFor, justifications: Justifications }, } - /// Operation mode. #[derive(Debug, PartialEq, Eq)] pub enum SyncMode { @@ -468,10 +459,7 @@ pub enum SyncMode { // Sync headers and block bodies Full, // Sync headers and the last finalied state - LightState { - storage_chain_mode: bool, - skip_proofs: bool, - }, + LightState { storage_chain_mode: bool, skip_proofs: bool }, } /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. @@ -517,12 +505,15 @@ impl ChainSync { fn required_block_attributes(&self) -> BlockAttributes { match self.mode { - SyncMode::Full => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, + SyncMode::Full => + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, SyncMode::LightState { storage_chain_mode: false, .. } => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, SyncMode::LightState { storage_chain_mode: true, .. } => - BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::INDEXED_BODY, + BlockAttributes::HEADER | + BlockAttributes::JUSTIFICATION | + BlockAttributes::INDEXED_BODY, } } @@ -538,24 +529,26 @@ impl ChainSync { /// /// Returns `None` if the peer is unknown. pub fn peer_info(&self, who: &PeerId) -> Option> { - self.peers.get(who).map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) + self.peers + .get(who) + .map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) } /// Returns the current sync status. pub fn status(&self) -> Status { let best_seen = self.peers.values().map(|p| p.best_number).max(); - let sync_state = - if let Some(n) = best_seen { - // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best queued block. - if n > self.best_queued_number && n - self.best_queued_number > MAJOR_SYNC_BLOCKS.into() { - SyncState::Downloading - } else { - SyncState::Idle - } + let sync_state = if let Some(n) = best_seen { + // A chain is classified as downloading if the provided best block is + // more than `MAJOR_SYNC_BLOCKS` behind the best queued block. + if n > self.best_queued_number && n - self.best_queued_number > MAJOR_SYNC_BLOCKS.into() + { + SyncState::Downloading } else { SyncState::Idle - }; + } + } else { + SyncState::Idle + }; Status { state: sync_state, @@ -569,7 +562,10 @@ impl ChainSync { /// Number of active forks requests. This includes /// requests that are pending or could be issued right away. pub fn num_sync_requests(&self) -> usize { - self.fork_targets.values().filter(|f| f.number <= self.best_queued_number).count() + self.fork_targets + .values() + .filter(|f| f.number <= self.best_queued_number) + .count() } /// Number of downloaded blocks. @@ -580,23 +576,26 @@ impl ChainSync { /// Handle a new connected peer. /// /// Call this method whenever we connect to a new peer. - pub fn new_peer(&mut self, who: PeerId, best_hash: B::Hash, best_number: NumberFor) - -> Result>, BadPeer> - { + pub fn new_peer( + &mut self, + who: PeerId, + best_hash: B::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer> { // There is nothing sync can get from the node that has no blockchain data. match self.block_status(&best_hash) { Err(e) => { debug!(target:"sync", "Error reading blockchain: {:?}", e); Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) - } + }, Ok(BlockStatus::KnownBad) => { info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); Err(BadPeer(who, rep::BAD_BLOCK)) - } + }, Ok(BlockStatus::Unknown) => { if best_number.is_zero() { info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)); + return Err(BadPeer(who, rep::GENESIS_MISMATCH)) } // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have // enough to do in the import queue that it's not worth kicking off @@ -608,13 +607,16 @@ impl ChainSync { self.best_queued_hash, self.best_queued_number ); - self.peers.insert(who.clone(), PeerSync { - peer_id: who, - common_number: self.best_queued_number, - best_hash, - best_number, - state: PeerSyncState::Available, - }); + self.peers.insert( + who.clone(), + PeerSync { + peer_id: who, + common_number: self.best_queued_number, + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); return Ok(None) } @@ -644,38 +646,46 @@ impl ChainSync { start: self.best_queued_number, state: AncestorSearchState::ExponentialBackoff(One::one()), }, - Some(ancestry_request::(common_best)) + Some(ancestry_request::(common_best)), ) }; self.pending_requests.add(&who); - self.peers.insert(who.clone(), PeerSync { - peer_id: who, - common_number: Zero::zero(), - best_hash, - best_number, - state, - }); + self.peers.insert( + who.clone(), + PeerSync { + peer_id: who, + common_number: Zero::zero(), + best_hash, + best_number, + state, + }, + ); Ok(req) - } - Ok(BlockStatus::Queued) | Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) => { + }, + Ok(BlockStatus::Queued) | + Ok(BlockStatus::InChainWithState) | + Ok(BlockStatus::InChainPruned) => { debug!( target: "sync", "New peer with known best hash {} ({}).", best_hash, best_number, ); - self.peers.insert(who.clone(), PeerSync { - peer_id: who.clone(), - common_number: std::cmp::min(self.best_queued_number, best_number), - best_hash, - best_number, - state: PeerSyncState::Available, - }); + self.peers.insert( + who.clone(), + PeerSync { + peer_id: who.clone(), + common_number: std::cmp::min(self.best_queued_number, best_number), + best_hash, + best_number, + state: PeerSyncState::Available, + }, + ); self.pending_requests.add(&who); Ok(None) - } + }, } } @@ -688,9 +698,8 @@ impl ChainSync { /// Schedule a justification request for the given block. pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { let client = &self.client; - self.extra_justifications.schedule((*hash, number), |base, block| { - is_descendent_of(&**client, base, block) - }) + self.extra_justifications + .schedule((*hash, number), |base, block| is_descendent_of(&**client, base, block)) } /// Clear all pending justification requests. @@ -707,7 +716,9 @@ impl ChainSync { number: NumberFor, ) { if peers.is_empty() { - peers = self.peers.iter() + peers = self + .peers + .iter() // Only request blocks from peers who are ahead or on a par. .filter(|(_, peer)| peer.best_number >= number) .map(|(id, _)| id.clone()) @@ -725,14 +736,14 @@ impl ChainSync { if self.is_known(&hash) { debug!(target: "sync", "Refusing to sync known hash {:?}", hash); - return; + return } trace!(target: "sync", "Downloading requested old fork {:?}", hash); for peer_id in &peers { if let Some(peer) = self.peers.get_mut(peer_id) { - if let PeerSyncState::AncestorSearch {..} = peer.state { - continue; + if let PeerSyncState::AncestorSearch { .. } = peer.state { + continue } if number > peer.best_number { @@ -745,22 +756,24 @@ impl ChainSync { self.fork_targets .entry(hash.clone()) - .or_insert_with(|| ForkTarget { - number, - peers: Default::default(), - parent_hash: None, - }) - .peers.extend(peers); + .or_insert_with(|| ForkTarget { number, peers: Default::default(), parent_hash: None }) + .peers + .extend(peers); } /// Get an iterator over all scheduled justification requests. - pub fn justification_requests(&mut self) -> impl Iterator)> + '_ { + pub fn justification_requests( + &mut self, + ) -> impl Iterator)> + '_ { let peers = &mut self.peers; let mut matcher = self.extra_justifications.matcher(); std::iter::from_fn(move || { if let Some((peer, request)) = matcher.next(&peers) { - peers.get_mut(&peer) - .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") + peers + .get_mut(&peer) + .expect( + "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", + ) .state = PeerSyncState::DownloadingJustification(request.0); let req = message::generic::BlockRequest { id: 0, @@ -768,7 +781,7 @@ impl ChainSync { from: message::FromBlock::Hash(request.0), to: None, direction: message::Direction::Ascending, - max: Some(1) + max: Some(1), }; Some((peer, req)) } else { @@ -790,7 +803,8 @@ impl ChainSync { let attrs = self.required_block_attributes(); let blocks = &mut self.blocks; let fork_targets = &mut self.fork_targets; - let last_finalized = std::cmp::min(self.best_queued_number, self.client.info().finalized_number); + let last_finalized = + std::cmp::min(self.best_queued_number, self.client.info().finalized_number); let best_queued = self.best_queued_number; let client = &self.client; let queue = &self.queue_blocks; @@ -806,9 +820,10 @@ impl ChainSync { // number is smaller than the last finalized block number, we should do an ancestor // search to find a better common block. If the queue is full we wait till all blocks are // imported though. - if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() - && best_queued < peer.best_number && peer.common_number < last_finalized - && queue.len() <= MAJOR_SYNC_BLOCKS.into() + if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && + best_queued < peer.best_number && + peer.common_number < last_finalized && + queue.len() <= MAJOR_SYNC_BLOCKS.into() { trace!( target: "sync", @@ -843,18 +858,14 @@ impl ChainSync { req, ); Some((id, req)) - } else if let Some((hash, req)) = fork_sync_request( - id, - fork_targets, - best_queued, - last_finalized, - attrs, - |hash| if queue.contains(hash) { - BlockStatus::Queued - } else { - client.block_status(&BlockId::Hash(*hash)).unwrap_or(BlockStatus::Unknown) - }, - ) { + } else if let Some((hash, req)) = + fork_sync_request(id, fork_targets, best_queued, last_finalized, attrs, |hash| { + if queue.contains(hash) { + BlockStatus::Queued + } else { + client.block_status(&BlockId::Hash(*hash)).unwrap_or(BlockStatus::Unknown) + } + }) { trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); peer.state = PeerSyncState::DownloadingStale(hash); Some((id, req)) @@ -869,11 +880,11 @@ impl ChainSync { pub fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { if let Some(sync) = &self.state_sync { if sync.is_complete() { - return None; + return None } if self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { // Only one pending state request is allowed. - return None; + return None } for (id, peer) in self.peers.iter_mut() { if peer.state.is_available() && peer.common_number >= sync.target_block_num() { @@ -898,38 +909,42 @@ impl ChainSync { &mut self, who: &PeerId, request: Option>, - response: BlockResponse + response: BlockResponse, ) -> Result, BadPeer> { self.downloaded_blocks += response.blocks.len(); - let new_blocks: Vec> = - if let Some(peer) = self.peers.get_mut(who) { - let mut blocks = response.blocks; - if request.as_ref().map_or(false, |r| r.direction == message::Direction::Descending) { - trace!(target: "sync", "Reversing incoming block list"); - blocks.reverse() - } - self.pending_requests.add(who); - if let Some(request) = request { - match &mut peer.state { - PeerSyncState::DownloadingNew(start_block) => { - self.blocks.clear_peer_download(who); - let start_block = *start_block; - peer.state = PeerSyncState::Available; - validate_blocks::(&blocks, who, Some(request))?; - self.blocks.insert(start_block, blocks, who.clone()); - self.drain_blocks() + let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { + let mut blocks = response.blocks; + if request + .as_ref() + .map_or(false, |r| r.direction == message::Direction::Descending) + { + trace!(target: "sync", "Reversing incoming block list"); + blocks.reverse() + } + self.pending_requests.add(who); + if let Some(request) = request { + match &mut peer.state { + PeerSyncState::DownloadingNew(start_block) => { + self.blocks.clear_peer_download(who); + let start_block = *start_block; + peer.state = PeerSyncState::Available; + validate_blocks::(&blocks, who, Some(request))?; + self.blocks.insert(start_block, blocks, who.clone()); + self.drain_blocks() + }, + PeerSyncState::DownloadingStale(_) => { + peer.state = PeerSyncState::Available; + if blocks.is_empty() { + debug!(target: "sync", "Empty block response from {}", who); + return Err(BadPeer(who.clone(), rep::NO_BLOCK)) } - PeerSyncState::DownloadingStale(_) => { - peer.state = PeerSyncState::Available; - if blocks.is_empty() { - debug!(target: "sync", "Empty block response from {}", who); - return Err(BadPeer(who.clone(), rep::NO_BLOCK)); - } - validate_blocks::(&blocks, who, Some(request))?; - blocks.into_iter().map(|b| { - let justifications = b.justifications.or( - legacy_justification_mapping(b.justification) - ); + validate_blocks::(&blocks, who, Some(request))?; + blocks + .into_iter() + .map(|b| { + let justifications = b + .justifications + .or(legacy_justification_mapping(b.justification)); IncomingBlock { hash: b.hash, header: b.header, @@ -942,110 +957,114 @@ impl ChainSync { skip_execution: self.skip_execution(), state: None, } - }).collect() + }) + .collect() + }, + PeerSyncState::AncestorSearch { current, start, state } => { + let matching_hash = match (blocks.get(0), self.client.hash(*current)) { + (Some(block), Ok(maybe_our_block_hash)) => { + trace!( + target: "sync", + "Got ancestry block #{} ({}) from peer {}", + current, + block.hash, + who, + ); + maybe_our_block_hash.filter(|x| x == &block.hash) + }, + (None, _) => { + debug!( + target: "sync", + "Invalid response when searching for ancestor from {}", + who, + ); + return Err(BadPeer(who.clone(), rep::UNKNOWN_ANCESTOR)) + }, + (_, Err(e)) => { + info!( + target: "sync", + "❌ Error answering legitimate blockchain query: {:?}", + e, + ); + return Err(BadPeer(who.clone(), rep::BLOCKCHAIN_READ_ERROR)) + }, + }; + if matching_hash.is_some() { + if *start < self.best_queued_number && + self.best_queued_number <= peer.best_number + { + // We've made progress on this chain since the search was started. + // Opportunistically set common number to updated number + // instead of the one that started the search. + peer.common_number = self.best_queued_number; + } else if peer.common_number < *current { + peer.common_number = *current; + } } - PeerSyncState::AncestorSearch { current, start, state } => { - let matching_hash = match (blocks.get(0), self.client.hash(*current)) { - (Some(block), Ok(maybe_our_block_hash)) => { - trace!( - target: "sync", - "Got ancestry block #{} ({}) from peer {}", - current, - block.hash, - who, - ); - maybe_our_block_hash.filter(|x| x == &block.hash) - }, - (None, _) => { - debug!( - target: "sync", - "Invalid response when searching for ancestor from {}", - who, - ); - return Err(BadPeer(who.clone(), rep::UNKNOWN_ANCESTOR)) - }, - (_, Err(e)) => { - info!( - target: "sync", - "❌ Error answering legitimate blockchain query: {:?}", - e, - ); - return Err(BadPeer(who.clone(), rep::BLOCKCHAIN_READ_ERROR)) - } + if matching_hash.is_none() && current.is_zero() { + trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); + return Err(BadPeer(who.clone(), rep::GENESIS_MISMATCH)) + } + if let Some((next_state, next_num)) = + handle_ancestor_search_state(state, *current, matching_hash.is_some()) + { + peer.state = PeerSyncState::AncestorSearch { + current: next_num, + start: *start, + state: next_state, }; - if matching_hash.is_some() { - if *start < self.best_queued_number && self.best_queued_number <= peer.best_number { - // We've made progress on this chain since the search was started. - // Opportunistically set common number to updated number - // instead of the one that started the search. - peer.common_number = self.best_queued_number; - } - else if peer.common_number < *current { - peer.common_number = *current; - } - } - if matching_hash.is_none() && current.is_zero() { - trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); - return Err(BadPeer(who.clone(), rep::GENESIS_MISMATCH)) - } - if let Some((next_state, next_num)) = - handle_ancestor_search_state(state, *current, matching_hash.is_some()) + return Ok(OnBlockData::Request( + who.clone(), + ancestry_request::(next_num), + )) + } else { + // Ancestry search is complete. Check if peer is on a stale fork unknown to us and + // add it to sync targets if necessary. + trace!( + target: "sync", + "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", + self.best_queued_hash, + self.best_queued_number, + peer.best_hash, + peer.best_number, + matching_hash, + peer.common_number, + ); + if peer.common_number < peer.best_number && + peer.best_number < self.best_queued_number { - peer.state = PeerSyncState::AncestorSearch { - current: next_num, - start: *start, - state: next_state, - }; - return Ok( - OnBlockData::Request(who.clone(), ancestry_request::(next_num)) - ) - } else { - // Ancestry search is complete. Check if peer is on a stale fork unknown to us and - // add it to sync targets if necessary. trace!( target: "sync", - "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", - self.best_queued_hash, - self.best_queued_number, + "Added fork target {} for {}", peer.best_hash, - peer.best_number, - matching_hash, - peer.common_number, + who, ); - if peer.common_number < peer.best_number - && peer.best_number < self.best_queued_number - { - trace!( - target: "sync", - "Added fork target {} for {}", - peer.best_hash, - who, - ); - self.fork_targets - .entry(peer.best_hash.clone()) - .or_insert_with(|| ForkTarget { - number: peer.best_number, - parent_hash: None, - peers: Default::default(), - }) - .peers.insert(who.clone()); - } - peer.state = PeerSyncState::Available; - Vec::new() + self.fork_targets + .entry(peer.best_hash.clone()) + .or_insert_with(|| ForkTarget { + number: peer.best_number, + parent_hash: None, + peers: Default::default(), + }) + .peers + .insert(who.clone()); } - }, - PeerSyncState::Available - | PeerSyncState::DownloadingJustification(..) - | PeerSyncState::DownloadingState - => Vec::new() - } - } else { - // When request.is_none() this is a block announcement. Just accept blocks. - validate_blocks::(&blocks, who, None)?; - blocks.into_iter().map(|b| { - let justifications = b.justifications.or( - legacy_justification_mapping(b.justification) - ); + peer.state = PeerSyncState::Available; + Vec::new() + } + }, + PeerSyncState::Available | + PeerSyncState::DownloadingJustification(..) | + PeerSyncState::DownloadingState => Vec::new(), + } + } else { + // When request.is_none() this is a block announcement. Just accept blocks. + validate_blocks::(&blocks, who, None)?; + blocks + .into_iter() + .map(|b| { + let justifications = + b.justifications.or(legacy_justification_mapping(b.justification)); IncomingBlock { hash: b.hash, header: b.header, @@ -1058,12 +1077,13 @@ impl ChainSync { skip_execution: true, state: None, } - }).collect() - } - } else { - // We don't know of this peer, so we also did not request anything from it. - return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)); - }; + }) + .collect() + } + } else { + // We don't know of this peer, so we also did not request anything from it. + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) + }; Ok(self.validate_and_queue_blocks(new_blocks)) } @@ -1087,7 +1107,7 @@ impl ChainSync { sync.import(response) } else { debug!(target: "sync", "Ignored obsolete state response from {}", who); - return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)); + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) }; match import_result { @@ -1112,14 +1132,13 @@ impl ChainSync { }; debug!(target: "sync", "State sync is complete. Import is queued"); Ok(OnStateData::Import(origin, block)) - } - state::ImportResult::Continue(request) => { - Ok(OnStateData::Request(who.clone(), request)) - } + }, + state::ImportResult::Continue(request) => + Ok(OnStateData::Request(who.clone(), request)), state::ImportResult::BadResponse => { debug!(target: "sync", "Bad state data received from {}", who); Err(BadPeer(who.clone(), rep::BAD_BLOCK)) - } + }, } } @@ -1139,7 +1158,10 @@ impl ChainSync { BlockOrigin::NetworkInitialSync }; - if let Some((h, n)) = new_blocks.last().and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) { + if let Some((h, n)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) + { trace!( target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", @@ -1159,16 +1181,17 @@ impl ChainSync { /// /// Returns `Some` if this produces a justification that must be imported /// into the import queue. - pub fn on_block_justification - (&mut self, who: PeerId, response: BlockResponse) -> Result, BadPeer> - { - let peer = - if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); - return Ok(OnBlockJustification::Nothing) - }; + pub fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer> { + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); + return Ok(OnBlockJustification::Nothing) + }; self.pending_requests.add(&who); if let PeerSyncState::DownloadingJustification(hash) = peer.state { @@ -1181,7 +1204,7 @@ impl ChainSync { target: "sync", "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash ); - return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); + return Err(BadPeer(who, rep::BAD_JUSTIFICATION)) } block.justifications.or(legacy_justification_mapping(block.justification)) @@ -1197,9 +1220,8 @@ impl ChainSync { None }; - if let Some((peer, hash, number, j)) = self - .extra_justifications - .on_response(who, justification) + if let Some((peer, hash, number, j)) = + self.extra_justifications.on_response(who, justification) { return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }) } @@ -1230,7 +1252,7 @@ impl ChainSync { } for (result, hash) in results { if has_error { - continue; + continue } if result.is_err() { @@ -1242,7 +1264,7 @@ impl ChainSync { if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { peer.update_common_number(number); } - } + }, Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( @@ -1274,7 +1296,8 @@ impl ChainSync { if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { peer.update_common_number(number); } - let state_sync_complete = self.state_sync.as_ref().map_or(false, |s| s.target() == hash); + let state_sync_complete = + self.state_sync.as_ref().map_or(false, |s| s.target() == hash); if state_sync_complete { info!( target: "sync", @@ -1286,7 +1309,7 @@ impl ChainSync { output.extend(self.restart()); } }, - Err(BlockImportError::IncompleteHeader(who)) => { + Err(BlockImportError::IncompleteHeader(who)) => if let Some(peer) = who { warn!( target: "sync", @@ -1294,9 +1317,8 @@ impl ChainSync { ); output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); output.extend(self.restart()); - } - }, - Err(BlockImportError::VerificationFailed(who, e)) => { + }, + Err(BlockImportError::VerificationFailed(who, e)) => if let Some(peer) = who { warn!( target: "sync", @@ -1307,9 +1329,8 @@ impl ChainSync { ); output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); output.extend(self.restart()); - } - }, - Err(BlockImportError::BadBlock(who)) => { + }, + Err(BlockImportError::BadBlock(who)) => if let Some(peer) = who { warn!( target: "sync", @@ -1318,21 +1339,19 @@ impl ChainSync { peer, ); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - } - }, + }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded // in the meantime because other chain has been finalized. // Don't mark it as bad as it still may be synced if explicitly requested. trace!(target: "sync", "Obsolete block {:?}", hash); }, - e @ Err(BlockImportError::UnknownParent) | - e @ Err(BlockImportError::Other(_)) => { + e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); self.state_sync = None; output.extend(self.restart()); }, - Err(BlockImportError::Cancelled) => {} + Err(BlockImportError::Cancelled) => {}, }; } @@ -1344,7 +1363,8 @@ impl ChainSync { /// with or without errors. pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; - self.extra_justifications.try_finalize_root((hash, number), finalization_result, true); + self.extra_justifications + .try_finalize_root((hash, number), finalization_result, true); self.pending_requests.set_all(); } @@ -1356,12 +1376,10 @@ impl ChainSync { }); if let SyncMode::LightState { skip_proofs, .. } = &self.mode { - if self.state_sync.is_none() - && !self.peers.is_empty() - && self.queue_blocks.is_empty() - { + if self.state_sync.is_none() && !self.peers.is_empty() && self.queue_blocks.is_empty() { // Finalized a recent block. - let mut heads: Vec<_> = self.peers.iter().map(|(_, peer)| peer.best_number).collect(); + let mut heads: Vec<_> = + self.peers.iter().map(|(_, peer)| peer.best_number).collect(); heads.sort(); let median = heads[heads.len() / 2]; if number + STATE_SYNC_FINALITY_THRESHOLD.saturated_into() >= median { @@ -1372,7 +1390,8 @@ impl ChainSync { number, hash, ); - self.state_sync = Some(StateSync::new(self.client.clone(), header, *skip_proofs)); + self.state_sync = + Some(StateSync::new(self.client.clone(), header, *skip_proofs)); } } } @@ -1400,15 +1419,12 @@ impl ChainSync { self.best_queued_hash = *hash; // Update common blocks for (n, peer) in self.peers.iter_mut() { - if let PeerSyncState::AncestorSearch {..} = peer.state { + if let PeerSyncState::AncestorSearch { .. } = peer.state { // Wait for ancestry search to complete first. - continue; + continue } - let new_common_number = if peer.best_number >= number { - number - } else { - peer.best_number - }; + let new_common_number = + if peer.best_number >= number { number } else { peer.best_number }; trace!( target: "sync", "Updating peer {} info, ours={}, common={}->{}, their best={}", @@ -1435,7 +1451,10 @@ impl ChainSync { /// /// It is *required* to call [`Self::peer_block_announce_validation_finished`] when the /// validation is finished to clear the slot. - fn has_slot_for_block_announce_validation(&mut self, peer: &PeerId) -> HasSlotForBlockAnnounceValidation { + fn has_slot_for_block_announce_validation( + &mut self, + peer: &PeerId, + ) -> HasSlotForBlockAnnounceValidation { if self.block_announce_validation.len() >= MAX_CONCURRENT_BLOCK_ANNOUNCE_VALIDATIONS { return HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached } @@ -1478,15 +1497,18 @@ impl ChainSync { ); if number.is_zero() { - self.block_announce_validation.push(async move { - warn!( - target: "sync", - "💔 Ignored genesis block (#0) announcement from {}: {}", - who, - hash, - ); - PreValidateBlockAnnounce::Skip - }.boxed()); + self.block_announce_validation.push( + async move { + warn!( + target: "sync", + "💔 Ignored genesis block (#0) announcement from {}: {}", + who, + hash, + ); + PreValidateBlockAnnounce::Skip + } + .boxed(), + ); return } @@ -1494,18 +1516,21 @@ impl ChainSync { match self.has_slot_for_block_announce_validation(&who) { HasSlotForBlockAnnounceValidation::Yes => {}, HasSlotForBlockAnnounceValidation::TotalMaximumSlotsReached => { - self.block_announce_validation.push(async move { - warn!( - target: "sync", - "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", - number, - hash, - who, - ); - PreValidateBlockAnnounce::Skip - }.boxed()); + self.block_announce_validation.push( + async move { + warn!( + target: "sync", + "💔 Ignored block (#{} -- {}) announcement from {} because all validation slots are occupied.", + number, + hash, + who, + ); + PreValidateBlockAnnounce::Skip + } + .boxed(), + ); return - } + }, HasSlotForBlockAnnounceValidation::MaximumPeerSlotsReached => { self.block_announce_validation.push(async move { warn!( @@ -1518,7 +1543,7 @@ impl ChainSync { PreValidateBlockAnnounce::Skip }.boxed()); return - } + }, } // Let external validator check the block announcement. @@ -1526,33 +1551,36 @@ impl ChainSync { let future = self.block_announce_validator.validate(&header, assoc_data); let hash = hash.clone(); - self.block_announce_validation.push(async move { - match future.await { - Ok(Validation::Success { is_new_best }) => PreValidateBlockAnnounce::Process { - is_new_best: is_new_best || is_best, - announce, - who, - }, - Ok(Validation::Failure { disconnect }) => { - debug!( - target: "sync", - "Block announcement validation of block {:?} from {} failed", - hash, + self.block_announce_validation.push( + async move { + match future.await { + Ok(Validation::Success { is_new_best }) => PreValidateBlockAnnounce::Process { + is_new_best: is_new_best || is_best, + announce, who, - ); - PreValidateBlockAnnounce::Failure { who, disconnect } - } - Err(e) => { - debug!( - target: "sync", - "💔 Block announcement validation of block {:?} errored: {}", - hash, - e, - ); - PreValidateBlockAnnounce::Error { who } + }, + Ok(Validation::Failure { disconnect }) => { + debug!( + target: "sync", + "Block announcement validation of block {:?} from {} failed", + hash, + who, + ); + PreValidateBlockAnnounce::Failure { who, disconnect } + }, + Err(e) => { + debug!( + target: "sync", + "💔 Block announcement validation of block {:?} errored: {}", + hash, + e, + ); + PreValidateBlockAnnounce::Error { who } + }, } } - }.boxed()); + .boxed(), + ); } /// Poll block announce validation. @@ -1603,7 +1631,7 @@ impl ChainSync { if *entry.get() == 0 { entry.remove(); } - } + }, } } @@ -1622,9 +1650,8 @@ impl ChainSync { ); return PollBlockAnnounceValidation::Failure { who, disconnect } }, - PreValidateBlockAnnounce::Process { announce, is_new_best, who } => { - (announce, is_new_best, who) - }, + PreValidateBlockAnnounce::Process { announce, is_new_best, who } => + (announce, is_new_best, who), PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => { debug!( target: "sync", @@ -1644,7 +1671,8 @@ impl ChainSync { let number = *announce.header.number(); let hash = announce.header.hash(); - let parent_status = self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); + let parent_status = + self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); let known_parent = parent_status != BlockStatus::Unknown; let ancient_parent = parent_status == BlockStatus::InChainPruned; @@ -1662,7 +1690,7 @@ impl ChainSync { peer.best_hash = hash; } - if let PeerSyncState::AncestorSearch {..} = peer.state { + if let PeerSyncState::AncestorSearch { .. } = peer.state { trace!(target: "sync", "Peer state is ancestor search."); return PollBlockAnnounceValidation::Nothing { is_best, who, announce } } @@ -1672,8 +1700,8 @@ impl ChainSync { if is_best { if known && self.best_queued_number >= number { peer.update_common_number(number); - } else if announce.header.parent_hash() == &self.best_queued_hash - || known_parent && self.best_queued_number >= number + } else if announce.header.parent_hash() == &self.best_queued_hash || + known_parent && self.best_queued_number >= number { peer.update_common_number(number - One::one()); } @@ -1727,7 +1755,8 @@ impl ChainSync { parent_hash: Some(*announce.header.parent_hash()), peers: Default::default(), }) - .peers.insert(who.clone()); + .peers + .insert(who.clone()); } PollBlockAnnounceValidation::Nothing { is_best, who, announce } @@ -1775,9 +1804,9 @@ impl ChainSync { // We make sure our commmon number is at least something we have. p.common_number = self.best_queued_number; self.peers.insert(id, p); - return None; - } - _ => {} + return None + }, + _ => {}, } // handle peers that were in other states. @@ -1792,7 +1821,7 @@ impl ChainSync { /// Find a block to start sync from. If we sync with state, that's the latest block we have state for. fn reset_sync_start_point(&mut self) -> Result<(), ClientError> { let info = self.client.info(); - if matches!(self.mode, SyncMode::LightState {..}) && info.finalized_state.is_some() { + if matches!(self.mode, SyncMode::LightState { .. }) && info.finalized_state.is_some() { log::warn!( target: "sync", "Can't use fast sync mode with a partially synced database. Reverting to full sync mode." @@ -1803,7 +1832,9 @@ impl ChainSync { self.best_queued_hash = info.best_hash; self.best_queued_number = info.best_number; if self.mode == SyncMode::Full { - if self.client.block_status(&BlockId::hash(info.best_hash))? != BlockStatus::InChainWithState { + if self.client.block_status(&BlockId::hash(info.best_hash))? != + BlockStatus::InChainWithState + { self.import_existing = true; // Latest state is missing, start with the last finalized state or genesis instead. if let Some((hash, number)) = info.finalized_state { @@ -1836,7 +1867,9 @@ impl ChainSync { /// Is any peer downloading the given hash? fn is_already_downloading(&self, hash: &B::Hash) -> bool { - self.peers.iter().any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + self.peers + .iter() + .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } /// Return some key metrics. @@ -1846,7 +1879,7 @@ impl ChainSync { queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), justifications: self.extra_justifications.metrics(), - _priv: () + _priv: (), } } @@ -1856,9 +1889,10 @@ impl ChainSync { .drain(self.best_queued_number + One::one()) .into_iter() .map(|block_data| { - let justifications = block_data.block.justifications.or( - legacy_justification_mapping(block_data.block.justification) - ); + let justifications = block_data + .block + .justifications + .or(legacy_justification_mapping(block_data.block.justification)); IncomingBlock { hash: block_data.block.hash, header: block_data.block.header, @@ -1871,16 +1905,18 @@ impl ChainSync { skip_execution: self.skip_execution(), state: None, } - }).collect() + }) + .collect() } - } // This is purely during a backwards compatible transitionary period and should be removed // once we can assume all nodes can send and receive multiple Justifications // The ID tag is hardcoded here to avoid depending on the GRANDPA crate. // See: https://github.com/paritytech/substrate/issues/8172 -fn legacy_justification_mapping(justification: Option) -> Option { +fn legacy_justification_mapping( + justification: Option, +) -> Option { justification.map(|just| (*b"FRNK", just).into()) } @@ -1889,7 +1925,7 @@ pub(crate) struct Metrics { pub(crate) queued_blocks: u32, pub(crate) fork_targets: u32, pub(crate) justifications: extra_requests::Metrics, - _priv: () + _priv: (), } /// Request the ancestry for a block. Sends a request for header and justification for the given @@ -1901,7 +1937,7 @@ fn ancestry_request(block: NumberFor) -> BlockRequest { from: message::FromBlock::Number(block), to: None, direction: message::Direction::Ascending, - max: Some(1) + max: Some(1), } } @@ -1935,7 +1971,7 @@ fn handle_ancestor_search_state( let next_distance_to_tip = *next_distance_to_tip; if block_hash_match && next_distance_to_tip == One::one() { // We found the ancestor in the first step so there is no need to execute binary search. - return None; + return None } if block_hash_match { let left = curr_block_num; @@ -1943,15 +1979,18 @@ fn handle_ancestor_search_state( let middle = left + (right - left) / two; Some((AncestorSearchState::BinarySearch(left, right), middle)) } else { - let next_block_num = curr_block_num.checked_sub(&next_distance_to_tip) - .unwrap_or_else(Zero::zero); + let next_block_num = + curr_block_num.checked_sub(&next_distance_to_tip).unwrap_or_else(Zero::zero); let next_distance_to_tip = next_distance_to_tip * two; - Some((AncestorSearchState::ExponentialBackoff(next_distance_to_tip), next_block_num)) + Some(( + AncestorSearchState::ExponentialBackoff(next_distance_to_tip), + next_block_num, + )) } - } + }, AncestorSearchState::BinarySearch(mut left, mut right) => { if left >= curr_block_num { - return None; + return None } if block_hash_match { left = curr_block_num; @@ -1961,7 +2000,7 @@ fn handle_ancestor_search_state( assert!(right >= left); let middle = left + (right - left) / two; Some((AncestorSearchState::BinarySearch(left, right), middle)) - } + }, } } @@ -1977,7 +2016,7 @@ fn peer_block_request( ) -> Option<(Range>, BlockRequest)> { if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. - return None; + return None } else if peer.common_number < finalized { trace!( target: "sync", @@ -2009,7 +2048,7 @@ fn peer_block_request( from, to: None, direction: message::Direction::Descending, - max: Some((range.end - range.start).saturated_into::()) + max: Some((range.end - range.start).saturated_into::()), }; Some((range, request)) @@ -2027,11 +2066,11 @@ fn fork_sync_request( targets.retain(|hash, r| { if r.number <= finalized { trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); - return false; + return false } if check_block(hash) != BlockStatus::Unknown { trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); - return false; + return false } true }); @@ -2048,27 +2087,34 @@ fn fork_sync_request( 1 }; trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); - return Some((hash.clone(), message::generic::BlockRequest { - id: 0, - fields: attributes.clone(), - from: message::FromBlock::Hash(hash.clone()), - to: None, - direction: message::Direction::Descending, - max: Some(count), - })) + return Some(( + hash.clone(), + message::generic::BlockRequest { + id: 0, + fields: attributes.clone(), + from: message::FromBlock::Hash(hash.clone()), + to: None, + direction: message::Direction::Descending, + max: Some(count), + }, + )) } } None } /// Returns `true` if the given `block` is a descendent of `base`. -fn is_descendent_of(client: &T, base: &Block::Hash, block: &Block::Hash) -> sp_blockchain::Result - where - Block: BlockT, - T: HeaderMetadata + ?Sized, +fn is_descendent_of( + client: &T, + base: &Block::Hash, + block: &Block::Hash, +) -> sp_blockchain::Result +where + Block: BlockT, + T: HeaderMetadata + ?Sized, { if base == block { - return Ok(false); + return Ok(false) } let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; @@ -2101,13 +2147,13 @@ fn validate_blocks( blocks.last() } else { blocks.first() - }.and_then(|b| b.header.as_ref()); + } + .and_then(|b| b.header.as_ref()); - let expected_block = block_header.as_ref() - .map_or(false, |h| match request.from { - message::FromBlock::Hash(hash) => h.hash() == hash, - message::FromBlock::Number(n) => h.number() == &n, - }); + let expected_block = block_header.as_ref().map_or(false, |h| match request.from { + message::FromBlock::Hash(hash) => h.hash() == hash, + message::FromBlock::Number(n) => h.number() == &n, + }); if !expected_block { debug!( @@ -2120,8 +2166,8 @@ fn validate_blocks( return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) } - if request.fields.contains(message::BlockAttributes::HEADER) - && blocks.iter().any(|b| b.header.is_none()) + if request.fields.contains(message::BlockAttributes::HEADER) && + blocks.iter().any(|b| b.header.is_none()) { trace!( target: "sync", @@ -2132,8 +2178,8 @@ fn validate_blocks( return Err(BadPeer(who.clone(), rep::BAD_RESPONSE)) } - if request.fields.contains(message::BlockAttributes::BODY) - && blocks.iter().any(|b| b.body.is_none()) + if request.fields.contains(message::BlockAttributes::BODY) && + blocks.iter().any(|b| b.body.is_none()) { trace!( target: "sync", @@ -2161,7 +2207,8 @@ fn validate_blocks( } if let (Some(header), Some(body)) = (&b.header, &b.body) { let expected = *header.extrinsics_root(); - let got = HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); + let got = + HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); if expected != got { debug!( target:"sync", @@ -2181,17 +2228,19 @@ fn validate_blocks( #[cfg(test)] mod test { - use super::message::{FromBlock, BlockState, BlockData}; - use super::*; + use super::{ + message::{BlockData, BlockState, FromBlock}, + *, + }; + use futures::{executor::block_on, future::poll_fn}; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use substrate_test_runtime_client::{ runtime::{Block, Hash, Header}, - ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, - BlockBuilderExt, TestClient, ClientExt, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClient, + TestClientBuilder, TestClientBuilderExt, }; - use futures::{future::poll_fn, executor::block_on}; #[test] fn processes_empty_response_on_justification_request_for_unknown_block() { @@ -2203,12 +2252,8 @@ mod test { let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); let peer_id = PeerId::random(); - let mut sync = ChainSync::new( - SyncMode::Full, - client.clone(), - block_announce_validator, - 1, - ).unwrap(); + let mut sync = + ChainSync::new(SyncMode::Full, client.clone(), block_announce_validator, 1).unwrap(); let (a1_hash, a1_number) = { let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -2222,50 +2267,36 @@ mod test { sync.request_justification(&a1_hash, a1_number); // the justification request should be scheduled to that peer - assert!( - sync.justification_requests().any(|(who, request)| { - who == peer_id && request.from == FromBlock::Hash(a1_hash) - }) - ); + assert!(sync + .justification_requests() + .any(|(who, request)| { who == peer_id && request.from == FromBlock::Hash(a1_hash) })); // there are no extra pending requests - assert_eq!( - sync.extra_justifications.pending_requests().count(), - 0, - ); + assert_eq!(sync.extra_justifications.pending_requests().count(), 0,); // there's one in-flight extra request to the expected peer - assert!( - sync.extra_justifications.active_requests().any(|(who, (hash, number))| { - *who == peer_id && *hash == a1_hash && *number == a1_number - }) - ); + assert!(sync.extra_justifications.active_requests().any(|(who, (hash, number))| { + *who == peer_id && *hash == a1_hash && *number == a1_number + })); // if the peer replies with an empty response (i.e. it doesn't know the block), // the active request should be cleared. assert_eq!( sync.on_block_justification( peer_id.clone(), - BlockResponse:: { - id: 0, - blocks: vec![], - } + BlockResponse:: { id: 0, blocks: vec![] } ), Ok(OnBlockJustification::Nothing), ); // there should be no in-flight requests - assert_eq!( - sync.extra_justifications.active_requests().count(), - 0, - ); + assert_eq!(sync.extra_justifications.active_requests().count(), 0,); // and the request should now be pending again, waiting for reschedule - assert!( - sync.extra_justifications.pending_requests().any(|(hash, number)| { - *hash == a1_hash && *number == a1_number - }) - ); + assert!(sync + .extra_justifications + .pending_requests() + .any(|(hash, number)| { *hash == a1_hash && *number == a1_number })); } #[test] @@ -2276,7 +2307,8 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, - ).unwrap(); + ) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); @@ -2311,10 +2343,10 @@ mod test { // the justification request should be scheduled to the // new peer which is at the given block assert!(sync.justification_requests().any(|(p, r)| { - p == peer_id3 - && r.fields == BlockAttributes::JUSTIFICATION - && r.from == message::FromBlock::Hash(b1_hash) - && r.to == None + p == peer_id3 && + r.fields == BlockAttributes::JUSTIFICATION && + r.from == message::FromBlock::Hash(b1_hash) && + r.to == None })); assert_eq!( @@ -2326,7 +2358,9 @@ mod test { let block_requests = sync.restart(); // which should make us send out block requests to the first two peers - assert!(block_requests.map(|r| r.unwrap()).all(|(p, _)| { p == peer_id1 || p == peer_id2 })); + assert!(block_requests + .map(|r| r.unwrap()) + .all(|(p, _)| { p == peer_id1 || p == peer_id2 })); // peer 3 should be unaffected it was downloading finality data assert_eq!( @@ -2337,30 +2371,18 @@ mod test { // Set common block to something that we don't have (e.g. failed import) sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; let _ = sync.restart().count(); - assert_eq!( - sync.peers.get(&peer_id3).unwrap().common_number, - 50 - ); + assert_eq!(sync.peers.get(&peer_id3).unwrap().common_number, 50); } /// Send a block annoucnement for the given `header`. - fn send_block_announce( - header: Header, - peer_id: &PeerId, - sync: &mut ChainSync, - ) { + fn send_block_announce(header: Header, peer_id: &PeerId, sync: &mut ChainSync) { let block_annnounce = BlockAnnounce { header: header.clone(), state: Some(BlockState::Best), data: Some(Vec::new()), }; - sync.push_block_announce_validation( - peer_id.clone(), - header.hash(), - block_annnounce, - true, - ); + sync.push_block_announce_validation(peer_id.clone(), header.hash(), block_annnounce, true); // Poll until we have procssed the block announcement block_on(poll_fn(|cx| loop { @@ -2374,8 +2396,9 @@ mod test { fn create_block_response(blocks: Vec) -> BlockResponse { BlockResponse:: { id: 0, - blocks: blocks.into_iter().map(|b| - BlockData:: { + blocks: blocks + .into_iter() + .map(|b| BlockData:: { hash: b.hash(), header: Some(b.header().clone()), body: Some(b.deconstruct().1), @@ -2384,8 +2407,8 @@ mod test { message_queue: None, justification: None, justifications: None, - } - ).collect(), + }) + .collect(), } } @@ -2414,11 +2437,8 @@ mod test { fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { let at = at.unwrap_or_else(|| client.info().best_hash); - let mut block_builder = client.new_block_at( - &BlockId::Hash(at), - Default::default(), - false, - ).unwrap(); + let mut block_builder = + client.new_block_at(&BlockId::Hash(at), Default::default(), false).unwrap(); if fork { block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); @@ -2450,15 +2470,16 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, - ).unwrap(); + ) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); let mut client2 = client.clone(); let mut build_block_at = |at, import| { - let mut block_builder = client2.new_block_at(&BlockId::Hash(at), Default::default(), false) - .unwrap(); + let mut block_builder = + client2.new_block_at(&BlockId::Hash(at), Default::default(), false).unwrap(); // Make sure we generate a different block as fork block_builder.push_storage_change(vec![1, 2, 3], Some(vec![4, 5, 6])).unwrap(); @@ -2517,13 +2538,11 @@ mod test { let response = create_block_response(vec![block2.clone()]); let res = sync.on_block_data(&peer_id1, Some(request2), response).unwrap(); - assert!( - matches!( - res, - OnBlockData::Import(_, blocks) - if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) - ) - ); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) + if blocks.iter().all(|b| [2, 3, 4].contains(b.header.as_ref().unwrap().number())) + )); let response = create_block_response(vec![block2.clone()]); let res = sync.on_block_data(&peer_id2, Some(request3), response).unwrap(); @@ -2552,7 +2571,9 @@ mod test { let blocks = { let mut client = Arc::new(TestClientBuilder::new().build()); - (0..MAX_DOWNLOAD_AHEAD * 2).map(|_| build_block(&mut client, None, false)).collect::>() + (0..MAX_DOWNLOAD_AHEAD * 2) + .map(|_| build_block(&mut client, None, false)) + .collect::>() }; let mut client = Arc::new(TestClientBuilder::new().build()); @@ -2563,14 +2584,16 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, - ).unwrap(); + ) + .unwrap(); let peer_id1 = PeerId::random(); let peer_id2 = PeerId::random(); let best_block = blocks.last().unwrap().clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()).unwrap(); + sync.new_peer(peer_id1.clone(), best_block.hash(), *best_block.header().number()) + .unwrap(); sync.new_peer(peer_id2.clone(), info.best_hash, 0).unwrap(); let mut best_block_num = 0; @@ -2590,18 +2613,17 @@ mod test { let response = create_block_response(resp_blocks.clone()); let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert!( - matches!( - res, - OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST - ), - ); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ),); best_block_num += MAX_BLOCKS_TO_REQUEST as u32; - resp_blocks.into_iter() - .rev() - .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); + resp_blocks + .into_iter() + .rev() + .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); } // "Wait" for the queue to clear @@ -2627,12 +2649,10 @@ mod test { let response = create_block_response(vec![blocks[(best_block_num - 1) as usize].clone()]); let res = sync.on_block_data(&peer_id2, Some(peer2_req), response).unwrap(); - assert!( - matches!( - res, - OnBlockData::Import(_, blocks) if blocks.is_empty() - ), - ); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.is_empty() + ),); let peer1_from = unwrap_from_block_number(peer1_req.unwrap().from); @@ -2671,10 +2691,13 @@ mod test { .cloned() .collect::>(); - fork_blocks.into_iter().chain( + fork_blocks + .into_iter() + .chain( (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) - .map(|_| build_block(&mut client, None, true)) - ).collect::>() + .map(|_| build_block(&mut client, None, true)), + ) + .collect::>() }; let info = client.info(); @@ -2684,27 +2707,27 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, - ).unwrap(); + ) + .unwrap(); let finalized_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2 - 1].clone(); let just = (*b"TEST", Vec::new()); - client.finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)).unwrap(); + client + .finalize_block(BlockId::Hash(finalized_block.hash()), Some(just)) + .unwrap(); sync.update_chain_info(&info.best_hash, info.best_number); let peer_id1 = PeerId::random(); let common_block = blocks[MAX_BLOCKS_TO_LOOK_BACKWARDS as usize / 2].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()).unwrap(); + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + .unwrap(); send_block_announce(fork_blocks.last().unwrap().header().clone(), &peer_id1, &mut sync); - let mut request = get_block_request( - &mut sync, - FromBlock::Number(info.best_number), - 1, - &peer_id1, - ); + let mut request = + get_block_request(&mut sync, FromBlock::Number(info.best_number), 1, &peer_id1); // Do the ancestor search loop { @@ -2739,36 +2762,34 @@ mod test { let response = create_block_response(resp_blocks.clone()); let res = sync.on_block_data(&peer_id1, Some(request), response).unwrap(); - assert!( - matches!( - res, - OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST - ), - ); + assert!(matches!( + res, + OnBlockData::Import(_, blocks) if blocks.len() == MAX_BLOCKS_TO_REQUEST + ),); best_block_num += MAX_BLOCKS_TO_REQUEST as u32; let _ = sync.on_blocks_processed( MAX_BLOCKS_TO_REQUEST as usize, MAX_BLOCKS_TO_REQUEST as usize, - resp_blocks.iter() + resp_blocks + .iter() .rev() - .map(|b| + .map(|b| { ( - Ok( - BlockImportResult::ImportedUnknown( - b.header().number().clone(), - Default::default(), - Some(peer_id1.clone()), - ) - ), + Ok(BlockImportResult::ImportedUnknown( + b.header().number().clone(), + Default::default(), + Some(peer_id1.clone()), + )), b.hash(), ) - ) - .collect() + }) + .collect(), ); - resp_blocks.into_iter() + resp_blocks + .into_iter() .rev() .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); } @@ -2786,21 +2807,21 @@ mod test { fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..3) - .map(|_| build_block(&mut client, None, false)) - .collect::>(); + let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); let mut sync = ChainSync::new( SyncMode::Full, client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, - ).unwrap(); + ) + .unwrap(); let peer_id1 = PeerId::random(); let common_block = blocks[1].clone(); // Connect the node we will sync from - sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()).unwrap(); + sync.new_peer(peer_id1.clone(), common_block.hash(), *common_block.header().number()) + .unwrap(); // Create a "new" header and announce it let mut header = blocks[0].header().clone(); diff --git a/substrate/client/network/src/protocol/sync/blocks.rs b/substrate/client/network/src/protocol/sync/blocks.rs index 01b5f6016f8a59a4b72483a20f5f5b4f071c12ef..e93d0174b82814674f22fb05433ecd3eadfec101 100644 --- a/substrate/client/network/src/protocol/sync/blocks.rs +++ b/substrate/client/network/src/protocol/sync/blocks.rs @@ -16,13 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cmp; -use std::ops::Range; -use std::collections::{HashMap, BTreeMap}; -use log::trace; +use crate::protocol::message; use libp2p::PeerId; +use log::trace; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; -use crate::protocol::message; +use std::{ + cmp, + collections::{BTreeMap, HashMap}, + ops::Range, +}; /// Block data with origin. #[derive(Debug, Clone, PartialEq, Eq)] @@ -35,10 +37,7 @@ pub struct BlockData { #[derive(Debug)] enum BlockRangeState { - Downloading { - len: NumberFor, - downloading: u32, - }, + Downloading { len: NumberFor, downloading: u32 }, Complete(Vec>), } @@ -62,10 +61,7 @@ pub struct BlockCollection { impl BlockCollection { /// Create a new instance. pub fn new() -> Self { - BlockCollection { - blocks: BTreeMap::new(), - peer_requests: HashMap::new(), - } + BlockCollection { blocks: BTreeMap::new(), peer_requests: HashMap::new() } } /// Clear everything. @@ -77,7 +73,7 @@ impl BlockCollection { /// Insert a set of blocks into collection. pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { if blocks.is_empty() { - return; + return } match self.blocks.get(&start) { @@ -86,13 +82,20 @@ impl BlockCollection { }, Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { trace!(target: "sync", "Ignored block data already downloaded: {}", start); - return; + return }, _ => (), } - self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter() - .map(|b| BlockData { origin: Some(who.clone()), block: b }).collect())); + self.blocks.insert( + start, + BlockRangeState::Complete( + blocks + .into_iter() + .map(|b| BlockData { origin: Some(who.clone()), block: b }) + .collect(), + ), + ); } /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. @@ -107,7 +110,7 @@ impl BlockCollection { ) -> Option>> { if peer_best <= common { // Bail out early - return None; + return None } // First block number that we need to download let first_different = common + >::one(); @@ -120,15 +123,13 @@ impl BlockCollection { break match (prev, next) { (Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) if downloading < max_parallel => - (*start .. *start + *len, downloading), + (*start..*start + *len, downloading), (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => - (*start + r.len() .. cmp::min(*next_start, *start + r.len() + count), 0), // gap - (Some((start, r)), None) => - (*start + r.len() .. *start + r.len() + count, 0), // last range - (None, None) => - (first_different .. first_different + count, 0), // empty + (*start + r.len()..cmp::min(*next_start, *start + r.len() + count), 0), // gap + (Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), /* last range */ + (None, None) => (first_different..first_different + count, 0), /* empty */ (None, Some((start, _))) if *start > first_different => - (first_different .. cmp::min(first_different + count, *start), 0), // gap at the start + (first_different..cmp::min(first_different + count, *start), 0), /* gap at the start */ _ => { prev = next; continue @@ -139,23 +140,33 @@ impl BlockCollection { // crop to peers best if range.start > peer_best { trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); - return None; + return None } range.end = cmp::min(peer_best + One::one(), range.end); - if self.blocks.iter().next().map_or(false, |(n, _)| range.start > *n + max_ahead.into()) { + if self + .blocks + .iter() + .next() + .map_or(false, |(n, _)| range.start > *n + max_ahead.into()) + { trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start); - return None; + return None } self.peer_requests.insert(who, range.start); - self.blocks.insert(range.start, BlockRangeState::Downloading { - len: range.end - range.start, - downloading: downloading + 1 - }); + self.blocks.insert( + range.start, + BlockRangeState::Downloading { + len: range.end - range.start, + downloading: downloading + 1, + }, + ); if range.end <= range.start { - panic!("Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", - range, count, peer_best, common, self.blocks); + panic!( + "Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", + range, count, peer_best, common, self.blocks + ); } Some(range) } @@ -188,16 +199,14 @@ impl BlockCollection { pub fn clear_peer_download(&mut self, who: &PeerId) { if let Some(start) = self.peer_requests.remove(who) { let remove = match self.blocks.get_mut(&start) { - Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { + Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) + if *downloading > 1 => + { *downloading -= 1; false - }, - Some(&mut BlockRangeState::Downloading { .. }) => { - true - }, - _ => { - false } + Some(&mut BlockRangeState::Downloading { .. }) => true, + _ => false, }; if remove { self.blocks.remove(&start); @@ -210,27 +219,28 @@ impl BlockCollection { mod test { use super::{BlockCollection, BlockData, BlockRangeState}; use crate::{protocol::message, PeerId}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; use sp_core::H256; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; type Block = RawBlock>; fn is_empty(bc: &BlockCollection) -> bool { - bc.blocks.is_empty() && - bc.peer_requests.is_empty() + bc.blocks.is_empty() && bc.peer_requests.is_empty() } fn generate_blocks(n: usize) -> Vec> { - (0 .. n).map(|_| message::generic::BlockData { - hash: H256::random(), - header: None, - body: None, - indexed_body: None, - message_queue: None, - receipt: None, - justification: None, - justifications: None, - }).collect() + (0..n) + .map(|_| message::generic::BlockData { + hash: H256::random(), + header: None, + body: None, + indexed_body: None, + message_queue: None, + receipt: None, + justification: None, + justifications: None, + }) + .collect() } #[test] @@ -252,32 +262,47 @@ mod test { let peer2 = PeerId::random(); let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1 .. 41)); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41 .. 81)); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81 .. 121)); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1..41)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41..81)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81..121)); bc.clear_peer_download(&peer1); bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); assert_eq!(bc.drain(1), vec![]); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121 .. 151)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121..151)); bc.clear_peer_download(&peer0); bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11 .. 41)); - assert_eq!(bc.drain(1), blocks[1..11].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11..41)); + assert_eq!( + bc.drain(1), + blocks[1..11] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .collect::>() + ); bc.clear_peer_download(&peer0); bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); let drained = bc.drain(12); - assert_eq!(drained[..30], blocks[11..41].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()[..]); - assert_eq!(drained[30..], blocks[41..81].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); + assert_eq!( + drained[..30], + blocks[11..41] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }) + .collect::>()[..] + ); + assert_eq!( + drained[30..], + blocks[41..81] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .collect::>()[..] + ); bc.clear_peer_download(&peer2); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81 .. 121)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81..121)); bc.clear_peer_download(&peer2); bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); bc.clear_peer_download(&peer1); @@ -285,25 +310,38 @@ mod test { assert_eq!(bc.drain(80), vec![]); let drained = bc.drain(81); - assert_eq!(drained[..40], blocks[81..121].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }).collect::>()[..]); - assert_eq!(drained[40..], blocks[121..150].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); + assert_eq!( + drained[..40], + blocks[81..121] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }) + .collect::>()[..] + ); + assert_eq!( + drained[40..], + blocks[121..150] + .iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }) + .collect::>()[..] + ); } #[test] fn large_gap() { let mut bc: BlockCollection = BlockCollection::new(); - bc.blocks.insert(100, BlockRangeState::Downloading { - len: 128, - downloading: 1, - }); - let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: None }).collect(); + bc.blocks.insert(100, BlockRangeState::Downloading { len: 128, downloading: 1 }); + let blocks = generate_blocks(10) + .into_iter() + .map(|b| BlockData { block: b, origin: None }) + .collect(); bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1 .. 100)); + assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1..100)); assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), Some(100 + 128 .. 100 + 128 + 128)); + assert_eq!( + bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), + Some(100 + 128..100 + 128 + 128) + ); } } diff --git a/substrate/client/network/src/protocol/sync/extra_requests.rs b/substrate/client/network/src/protocol/sync/extra_requests.rs index 3de79b3f48734d97b3b5e9ce0492c07dc7cf4fcb..52419b5d7702c95f2776b248e41c82901ecacdae 100644 --- a/substrate/client/network/src/protocol/sync/extra_requests.rs +++ b/substrate/client/network/src/protocol/sync/extra_requests.rs @@ -16,14 +16,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_blockchain::Error as ClientError; use crate::protocol::sync::{PeerSync, PeerSyncState}; use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, trace, warn}; +use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::time::Duration; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + time::Duration, +}; use wasm_timer::Instant; // Time to wait before trying to get the same extra data from the same peer. @@ -61,7 +63,7 @@ pub(crate) struct Metrics { pub(crate) active_requests: u32, pub(crate) importing_requests: u32, pub(crate) failed_requests: u32, - _priv: () + _priv: (), } impl ExtraRequests { @@ -93,13 +95,14 @@ impl ExtraRequests { /// Queue an extra data request to be considered by the `Matcher`. pub(crate) fn schedule(&mut self, request: ExtraRequest, is_descendent_of: F) - where F: Fn(&B::Hash, &B::Hash) -> Result + where + F: Fn(&B::Hash, &B::Hash) -> Result, { match self.tree.import(request.0, request.1, (), &is_descendent_of) { Ok(true) => { // this is a new root so we add it to the current `pending_requests` self.pending_requests.push_back((request.0, request.1)); - } + }, Err(fork_tree::Error::Revert) => { // we have finalized further than the given request, presumably // by some other part of the system (not sync). we can safely @@ -107,8 +110,8 @@ impl ExtraRequests { }, Err(err) => { debug!(target: "sync", "Failed to insert request {:?} into tree: {:?}", request, err); - } - _ => () + }, + _ => (), } } @@ -120,7 +123,11 @@ impl ExtraRequests { } /// Processes the response for the request previously sent to the given peer. - pub(crate) fn on_response(&mut self, who: PeerId, resp: Option) -> Option<(PeerId, B::Hash, NumberFor, R)> { + pub(crate) fn on_response( + &mut self, + who: PeerId, + resp: Option, + ) -> Option<(PeerId, B::Hash, NumberFor, R)> { // we assume that the request maps to the given response, this is // currently enforced by the outer network protocol before passing on // messages to chain sync. @@ -157,9 +164,10 @@ impl ExtraRequests { &mut self, best_finalized_hash: &B::Hash, best_finalized_number: NumberFor, - is_descendent_of: F + is_descendent_of: F, ) -> Result<(), fork_tree::Error> - where F: Fn(&B::Hash, &B::Hash) -> Result + where + F: Fn(&B::Hash, &B::Hash) -> Result, { let request = (*best_finalized_hash, best_finalized_number); @@ -203,9 +211,8 @@ impl ExtraRequests { &mut self, request: ExtraRequest, result: Result, E>, - reschedule_on_failure: bool - ) -> bool - { + reschedule_on_failure: bool, + ) -> bool { if !self.importing_requests.remove(&request) { return false } @@ -217,7 +224,7 @@ impl ExtraRequests { self.pending_requests.push_front(request); } return true - } + }, }; if self.tree.finalize_root(&finalized_hash).is_none() { @@ -258,7 +265,7 @@ impl ExtraRequests { active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), - _priv: () + _priv: (), } } } @@ -269,15 +276,12 @@ pub(crate) struct Matcher<'a, B: BlockT> { /// Length of pending requests collection. /// Used to ensure we do not loop more than once over all pending requests. remaining: usize, - extras: &'a mut ExtraRequests + extras: &'a mut ExtraRequests, } impl<'a, B: BlockT> Matcher<'a, B> { fn new(extras: &'a mut ExtraRequests) -> Self { - Matcher { - remaining: extras.pending_requests.len(), - extras - } + Matcher { remaining: extras.pending_requests.len(), extras } } /// Finds a peer to which a pending request can be sent. @@ -294,7 +298,10 @@ impl<'a, B: BlockT> Matcher<'a, B> { /// /// The returned `PeerId` (if any) is guaranteed to come from the given `peers` /// argument. - pub(crate) fn next(&mut self, peers: &HashMap>) -> Option<(PeerId, ExtraRequest)> { + pub(crate) fn next( + &mut self, + peers: &HashMap>, + ) -> Option<(PeerId, ExtraRequest)> { if self.remaining == 0 { return None } @@ -305,7 +312,9 @@ impl<'a, B: BlockT> Matcher<'a, B> { } while let Some(request) = self.extras.pending_requests.pop_front() { - for (peer, sync) in peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) { + for (peer, sync) in + peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) + { // only ask peers that have synced at least up to the block number that we're asking the extra for if sync.best_number < request.1 { continue @@ -315,7 +324,13 @@ impl<'a, B: BlockT> Matcher<'a, B> { continue } // only ask if the same request has not failed for this peer before - if self.extras.failed_requests.get(&request).map(|rr| rr.iter().any(|i| &i.0 == peer)).unwrap_or(false) { + if self + .extras + .failed_requests + .get(&request) + .map(|rr| rr.iter().any(|i| &i.0 == peer)) + .unwrap_or(false) + { continue } self.extras.active_requests.insert(peer.clone(), request); @@ -343,22 +358,22 @@ impl<'a, B: BlockT> Matcher<'a, B> { #[cfg(test)] mod tests { + use super::*; use crate::protocol::sync::PeerSync; - use sp_blockchain::Error as ClientError; use quickcheck::{Arbitrary, Gen, QuickCheck}; - use std::collections::{HashMap, HashSet}; - use super::*; + use sp_blockchain::Error as ClientError; use sp_test_primitives::{Block, BlockNumber, Hash}; + use std::collections::{HashMap, HashSet}; #[test] fn requests_are_processed_in_order() { fn property(mut peers: ArbitraryPeers) { let mut requests = ExtraRequests::::new("test"); - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); + let num_peers_available = + peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); - for i in 0 .. num_peers_available { + for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } @@ -368,12 +383,12 @@ mod tests { for p in &pending { let (peer, r) = m.next(&peers.0).unwrap(); assert_eq!(p, &r); - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); } } - QuickCheck::new() - .quickcheck(property as fn(ArbitraryPeers)) + QuickCheck::new().quickcheck(property as fn(ArbitraryPeers)) } #[test] @@ -398,22 +413,24 @@ mod tests { fn property(mut peers: ArbitraryPeers) -> bool { let mut requests = ExtraRequests::::new("test"); - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); + let num_peers_available = + peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); - for i in 0 .. num_peers_available { + for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } let mut m = requests.matcher(); while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); } assert!(requests.pending_requests.is_empty()); let active_peers = requests.active_requests.keys().cloned().collect::>(); - let previously_active = requests.active_requests.values().cloned().collect::>(); + let previously_active = + requests.active_requests.values().cloned().collect::>(); for peer in &active_peers { requests.peer_disconnected(peer) @@ -424,8 +441,7 @@ mod tests { previously_active == requests.pending_requests.iter().cloned().collect::>() } - QuickCheck::new() - .quickcheck(property as fn(ArbitraryPeers) -> bool) + QuickCheck::new().quickcheck(property as fn(ArbitraryPeers) -> bool) } #[test] @@ -433,31 +449,44 @@ mod tests { fn property(mut peers: ArbitraryPeers) { let mut requests = ExtraRequests::::new("test"); - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); + let num_peers_available = + peers.0.values().filter(|s| s.state == PeerSyncState::Available).count(); - for i in 0 .. num_peers_available { + for i in 0..num_peers_available { requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) } let mut m = requests.matcher(); while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); } - let active = requests.active_requests.iter().map(|(p, &r)| (p.clone(), r)).collect::>(); + let active = requests + .active_requests + .iter() + .map(|(p, &r)| (p.clone(), r)) + .collect::>(); for (peer, req) in &active { assert!(requests.failed_requests.get(req).is_none()); assert!(!requests.pending_requests.contains(req)); assert!(requests.on_response::<()>(peer.clone(), None).is_none()); assert!(requests.pending_requests.contains(req)); - assert_eq!(1, requests.failed_requests.get(req).unwrap().iter().filter(|(p, _)| p == peer).count()) + assert_eq!( + 1, + requests + .failed_requests + .get(req) + .unwrap() + .iter() + .filter(|(p, _)| p == peer) + .count() + ) } } - QuickCheck::new() - .quickcheck(property as fn(ArbitraryPeers)) + QuickCheck::new().quickcheck(property as fn(ArbitraryPeers)) } #[test] @@ -497,7 +526,10 @@ mod tests { finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash7, 7)), true); // ensure that there's no request for #6 - assert_eq!(finality_proofs.pending_requests.iter().collect::>(), Vec::<&(Hash, u64)>::new()); + assert_eq!( + finality_proofs.pending_requests.iter().collect::>(), + Vec::<&(Hash, u64)>::new() + ); } #[test] @@ -560,7 +592,7 @@ mod tests { impl Arbitrary for ArbitraryPeers { fn arbitrary(g: &mut Gen) -> Self { let mut peers = HashMap::with_capacity(g.size()); - for _ in 0 .. g.size() { + for _ in 0..g.size() { let ps = ArbitraryPeerSync::arbitrary(g).0; peers.insert(ps.peer_id.clone(), ps); } diff --git a/substrate/client/network/src/protocol/sync/state.rs b/substrate/client/network/src/protocol/sync/state.rs index fc9dfdbb8c376e5ed89d8f76ca10629a10454d30..73e4eac1f5bc07388f3b3dc5289897b26c10e125 100644 --- a/substrate/client/network/src/protocol/sync/state.rs +++ b/substrate/client/network/src/protocol/sync/state.rs @@ -16,13 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::sync::Arc; -use codec::{Encode, Decode}; -use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; -use sc_client_api::StorageProof; -use crate::schema::v1::{StateRequest, StateResponse, StateEntry}; -use crate::chain::{Client, ImportedState}; use super::StateDownloadProgress; +use crate::{ + chain::{Client, ImportedState}, + schema::v1::{StateEntry, StateRequest, StateResponse}, +}; +use codec::{Decode, Encode}; +use sc_client_api::StorageProof; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use std::sync::Arc; /// State sync support. @@ -73,14 +75,14 @@ impl StateSync { target: "sync", "Bad state response", ); - return ImportResult::BadResponse; + return ImportResult::BadResponse } if !self.skip_proof && response.proof.is_empty() { log::debug!( target: "sync", "Missing proof", ); - return ImportResult::BadResponse; + return ImportResult::BadResponse } let complete = if !self.skip_proof { log::debug!( @@ -93,24 +95,21 @@ impl StateSync { Ok(proof) => proof, Err(e) => { log::debug!(target: "sync", "Error decoding proof: {:?}", e); - return ImportResult::BadResponse; - } - }; - let (values, complete) = match self.client.verify_range_proof( - self.target_root, - proof, - &self.last_key - ) { - Err(e) => { - log::debug!( - target: "sync", - "StateResponse failed proof verification: {:?}", - e, - ); - return ImportResult::BadResponse; + return ImportResult::BadResponse }, - Ok(values) => values, }; + let (values, complete) = + match self.client.verify_range_proof(self.target_root, proof, &self.last_key) { + Err(e) => { + log::debug!( + target: "sync", + "StateResponse failed proof verification: {:?}", + e, + ); + return ImportResult::BadResponse + }, + Ok(values) => values, + }; log::debug!(target: "sync", "Imported with {} keys", values.len()); if let Some(last) = values.last().map(|(k, _)| k) { @@ -120,7 +119,7 @@ impl StateSync { for (key, value) in values { self.imported_bytes += key.len() as u64; self.state.push((key, value)) - }; + } self.imported_bytes += proof_size; complete } else { @@ -142,10 +141,14 @@ impl StateSync { }; if complete { self.complete = true; - ImportResult::Import(self.target_block.clone(), self.target_header.clone(), ImportedState { - block: self.target_block.clone(), - state: std::mem::take(&mut self.state) - }) + ImportResult::Import( + self.target_block.clone(), + self.target_header.clone(), + ImportedState { + block: self.target_block.clone(), + state: std::mem::take(&mut self.state), + }, + ) } else { ImportResult::Continue(self.next_request()) } @@ -178,10 +181,6 @@ impl StateSync { /// Returns state sync estimated progress. pub fn progress(&self) -> StateDownloadProgress { let percent_done = (*self.last_key.get(0).unwrap_or(&0u8) as u32) * 100 / 256; - StateDownloadProgress { - percentage: percent_done, - size: self.imported_bytes, - } + StateDownloadProgress { percentage: percent_done, size: self.imported_bytes } } } - diff --git a/substrate/client/network/src/request_responses.rs b/substrate/client/network/src/request_responses.rs index 20469e143d41ec36091bded5dbdc6e7015494b0f..226e1c546d6c971f86595a7b8128758cb42680c1 100644 --- a/substrate/client/network/src/request_responses.rs +++ b/substrate/client/network/src/request_responses.rs @@ -33,17 +33,20 @@ //! //! - If provided, a ["requests processing"](ProtocolConfig::inbound_queue) channel //! is used to handle incoming requests. -//! -use futures::{channel::{mpsc, oneshot}, prelude::*}; +use crate::ReputationChange; +use futures::{ + channel::{mpsc, oneshot}, + prelude::*, +}; use libp2p::{ core::{ connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, }, request_response::{ - RequestResponse, RequestResponseCodec, RequestResponseConfig, RequestResponseEvent, - RequestResponseMessage, ResponseChannel, ProtocolSupport + ProtocolSupport, RequestResponse, RequestResponseCodec, RequestResponseConfig, + RequestResponseEvent, RequestResponseMessage, ResponseChannel, }, swarm::{ protocols_handler::multi::MultiHandler, NetworkBehaviour, NetworkBehaviourAction, @@ -51,58 +54,62 @@ use libp2p::{ }, }; use std::{ - borrow::Cow, collections::{hash_map::Entry, HashMap}, convert::TryFrom as _, io, iter, - pin::Pin, task::{Context, Poll}, time::Duration, + borrow::Cow, + collections::{hash_map::Entry, HashMap}, + convert::TryFrom as _, + io, iter, + pin::Pin, + task::{Context, Poll}, + time::Duration, }; use wasm_timer::Instant; -use crate::ReputationChange; pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; /// Configuration for a single request-response protocol. #[derive(Debug, Clone)] pub struct ProtocolConfig { - /// Name of the protocol on the wire. Should be something like `/foo/bar`. - pub name: Cow<'static, str>, - - /// Maximum allowed size, in bytes, of a request. - /// - /// Any request larger than this value will be declined as a way to avoid allocating too - /// much memory for it. - pub max_request_size: u64, - - /// Maximum allowed size, in bytes, of a response. - /// - /// Any response larger than this value will be declined as a way to avoid allocating too - /// much memory for it. - pub max_response_size: u64, - - /// Duration after which emitted requests are considered timed out. - /// - /// If you expect the response to come back quickly, you should set this to a smaller duration. - pub request_timeout: Duration, - - /// Channel on which the networking service will send incoming requests. - /// - /// Every time a peer sends a request to the local node using this protocol, the networking - /// service will push an element on this channel. The receiving side of this channel then has - /// to pull this element, process the request, and send back the response to send back to the - /// peer. - /// - /// The size of the channel has to be carefully chosen. If the channel is full, the networking - /// service will discard the incoming request send back an error to the peer. Consequently, - /// the channel being full is an indicator that the node is overloaded. - /// - /// You can typically set the size of the channel to `T / d`, where `T` is the - /// `request_timeout` and `d` is the expected average duration of CPU and I/O it takes to - /// build a response. - /// - /// Can be `None` if the local node does not support answering incoming requests. - /// If this is `None`, then the local node will not advertise support for this protocol towards - /// other peers. If this is `Some` but the channel is closed, then the local node will - /// advertise support for this protocol, but any incoming request will lead to an error being - /// sent back. - pub inbound_queue: Option>, + /// Name of the protocol on the wire. Should be something like `/foo/bar`. + pub name: Cow<'static, str>, + + /// Maximum allowed size, in bytes, of a request. + /// + /// Any request larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_request_size: u64, + + /// Maximum allowed size, in bytes, of a response. + /// + /// Any response larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_response_size: u64, + + /// Duration after which emitted requests are considered timed out. + /// + /// If you expect the response to come back quickly, you should set this to a smaller duration. + pub request_timeout: Duration, + + /// Channel on which the networking service will send incoming requests. + /// + /// Every time a peer sends a request to the local node using this protocol, the networking + /// service will push an element on this channel. The receiving side of this channel then has + /// to pull this element, process the request, and send back the response to send back to the + /// peer. + /// + /// The size of the channel has to be carefully chosen. If the channel is full, the networking + /// service will discard the incoming request send back an error to the peer. Consequently, + /// the channel being full is an indicator that the node is overloaded. + /// + /// You can typically set the size of the channel to `T / d`, where `T` is the + /// `request_timeout` and `d` is the expected average duration of CPU and I/O it takes to + /// build a response. + /// + /// Can be `None` if the local node does not support answering incoming requests. + /// If this is `None`, then the local node will not advertise support for this protocol towards + /// other peers. If this is `Some` but the channel is closed, then the local node will + /// advertise support for this protocol, but any incoming request will lead to an error being + /// sent back. + pub inbound_queue: Option>, } /// A single request received by a peer on a request-response protocol. @@ -179,14 +186,11 @@ pub enum Event { /// Duration the request took. duration: Duration, /// Result of the request. - result: Result<(), RequestFailure> + result: Result<(), RequestFailure>, }, /// A request protocol handler issued reputation changes for the given peer. - ReputationChanges { - peer: PeerId, - changes: Vec, - } + ReputationChanges { peer: PeerId, changes: Vec }, } /// Combination of a protocol name and a request id. @@ -234,19 +238,17 @@ pub struct RequestResponsesBehaviour { /// "response builder" used to build responses for incoming requests. protocols: HashMap< Cow<'static, str>, - (RequestResponse, Option>) + (RequestResponse, Option>), >, /// Pending requests, passed down to a [`RequestResponse`] behaviour, awaiting a reply. - pending_requests: HashMap< - ProtocolRequestId, - (Instant, oneshot::Sender, RequestFailure>>), - >, + pending_requests: + HashMap, RequestFailure>>)>, /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the /// start time and the response to send back to the remote. pending_responses: stream::FuturesUnordered< - Pin> + Send>> + Pin> + Send>>, >, /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. @@ -282,15 +284,18 @@ impl RequestResponsesBehaviour { ProtocolSupport::Outbound }; - let rq_rp = RequestResponse::new(GenericCodec { - max_request_size: protocol.max_request_size, - max_response_size: protocol.max_response_size, - }, iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), cfg); + let rq_rp = RequestResponse::new( + GenericCodec { + max_request_size: protocol.max_request_size, + max_response_size: protocol.max_response_size, + }, + iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), + cfg, + ); match protocols.entry(protocol.name) { Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)), - Entry::Occupied(e) => - return Err(RegisterError::DuplicateProtocol(e.key().clone())), + Entry::Occupied(e) => return Err(RegisterError::DuplicateProtocol(e.key().clone())), }; } @@ -348,19 +353,20 @@ impl RequestResponsesBehaviour { } impl NetworkBehaviour for RequestResponsesBehaviour { - type ProtocolsHandler = MultiHandler< - String, - as NetworkBehaviour>::ProtocolsHandler, - >; + type ProtocolsHandler = + MultiHandler as NetworkBehaviour>::ProtocolsHandler>; type OutEvent = Event; fn new_handler(&mut self) -> Self::ProtocolsHandler { - let iter = self.protocols.iter_mut() + let iter = self + .protocols + .iter_mut() .map(|(p, (r, _))| (p.to_string(), NetworkBehaviour::new_handler(r))); - MultiHandler::try_from_iter(iter) - .expect("Protocols are in a HashMap and there can be at most one handler per \ - protocol name, which is the only possible error; qed") + MultiHandler::try_from_iter(iter).expect( + "Protocols are in a HashMap and there can be at most one handler per \ + protocol name, which is the only possible error; qed", + ) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { @@ -384,7 +390,12 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::inject_connection_closed(p, peer_id, conn, endpoint) } @@ -400,7 +411,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { &mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, - error: &dyn std::error::Error + error: &dyn std::error::Error, ) { for (p, _) in self.protocols.values_mut() { NetworkBehaviour::inject_addr_reach_failure(p, peer_id, addr, error) @@ -488,11 +499,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, protocol: protocol_name, inner_channel, - response: OutgoingResponse { - result, - reputation_changes, - sent_feedback, - }, + response: OutgoingResponse { result, reputation_changes, sent_feedback }, } = match outcome { Some(outcome) => outcome, // The response builder was too busy or handling the request failed. This is @@ -514,10 +521,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour { ); } else { if let Some(sent_feedback) = sent_feedback { - self.send_feedback.insert( - (protocol_name, request_id).into(), - sent_feedback - ); + self.send_feedback + .insert((protocol_name, request_id).into(), sent_feedback); } } } @@ -525,11 +530,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour { if !reputation_changes.is_empty() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent( - Event::ReputationChanges{ - peer, - changes: reputation_changes, - }, - )); + Event::ReputationChanges { peer, changes: reputation_changes }, + )) } } @@ -543,38 +545,35 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // Other events generated by the underlying behaviour are transparently // passed through. NetworkBehaviourAction::DialAddress { address } => { - log::error!("The request-response isn't supposed to start dialing peers"); + log::error!( + "The request-response isn't supposed to start dialing peers" + ); return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) - } - NetworkBehaviourAction::DialPeer { peer_id, condition } => { + }, + NetworkBehaviourAction::DialPeer { peer_id, condition } => return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition, - }) - } - NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event, - } => { + }), + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event: ((*protocol).to_string(), event), - }) - } - NetworkBehaviourAction::ReportObservedAddr { address, score } => { + }), + NetworkBehaviourAction::ReportObservedAddr { address, score } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { - address, score, - }) - } + address, + score, + }), }; match ev { // Received a request from a remote. RequestResponseEvent::Message { peer, - message: RequestResponseMessage::Request { request_id, request, channel, .. }, + message: + RequestResponseMessage::Request { request_id, request, channel, .. }, } => { self.pending_responses_arrival_time.insert( (protocol.clone(), request_id.clone()).into(), @@ -605,7 +604,11 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // `InboundFailure::Omission` event. if let Ok(response) = rx.await { Some(RequestProcessingOutcome { - peer, request_id, protocol, inner_channel: channel, response + peer, + request_id, + protocol, + inner_channel: channel, + response, }) } else { None @@ -614,27 +617,25 @@ impl NetworkBehaviour for RequestResponsesBehaviour { // This `continue` makes sure that `pending_responses` gets polled // after we have added the new element. - continue 'poll_all; - } + continue 'poll_all + }, // Received a response from a remote to one of our requests. RequestResponseEvent::Message { peer, - message: RequestResponseMessage::Response { - request_id, - response, - }, + message: RequestResponseMessage::Response { request_id, response }, .. } => { - let (started, delivered) = match self.pending_requests.remove( - &(protocol.clone(), request_id).into(), - ) { + let (started, delivered) = match self + .pending_requests + .remove(&(protocol.clone(), request_id).into()) + { Some((started, pending_response)) => { - let delivered = pending_response.send( - response.map_err(|()| RequestFailure::Refused), - ).map_err(|_| RequestFailure::Obsolete); + let delivered = pending_response + .send(response.map_err(|()| RequestFailure::Refused)) + .map_err(|_| RequestFailure::Obsolete); (started, delivered) - } + }, None => { log::warn!( target: "sub-libp2p", @@ -642,8 +643,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, ); debug_assert!(false); - continue; - } + continue + }, }; let out = Event::RequestFinished { @@ -653,21 +654,22 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: delivered, }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, // One of our requests has failed. RequestResponseEvent::OutboundFailure { - peer, - request_id, - error, - .. + peer, request_id, error, .. } => { - let started = match self.pending_requests.remove(&(protocol.clone(), request_id).into()) { + let started = match self + .pending_requests + .remove(&(protocol.clone(), request_id).into()) + { Some((started, pending_response)) => { - if pending_response.send( - Err(RequestFailure::Network(error.clone())), - ).is_err() { + if pending_response + .send(Err(RequestFailure::Network(error.clone()))) + .is_err() + { log::debug!( target: "sub-libp2p", "Request with id {:?} failed. At the same time local \ @@ -676,7 +678,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { ); } started - } + }, None => { log::warn!( target: "sub-libp2p", @@ -684,8 +686,8 @@ impl NetworkBehaviour for RequestResponsesBehaviour { request_id, ); debug_assert!(false); - continue; - } + continue + }, }; let out = Event::RequestFinished { @@ -695,29 +697,30 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: Err(RequestFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, // An inbound request failed, either while reading the request or due to failing // to send a response. - RequestResponseEvent::InboundFailure { request_id, peer, error, .. } => { - self.pending_responses_arrival_time.remove( - &(protocol.clone(), request_id).into(), - ); + RequestResponseEvent::InboundFailure { + request_id, peer, error, .. + } => { + self.pending_responses_arrival_time + .remove(&(protocol.clone(), request_id).into()); self.send_feedback.remove(&(protocol.clone(), request_id).into()); let out = Event::InboundRequest { peer, protocol: protocol.clone(), result: Err(ResponseFailure::Network(error)), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, // A response to an inbound request has been sent. RequestResponseEvent::ResponseSent { request_id, peer } => { - let arrival_time = self.pending_responses_arrival_time.remove( - &(protocol.clone(), request_id).into(), - ) + let arrival_time = self + .pending_responses_arrival_time + .remove(&(protocol.clone(), request_id).into()) .map(|t| t.elapsed()) .expect( "Time is added for each inbound request on arrival and only \ @@ -727,9 +730,9 @@ impl NetworkBehaviour for RequestResponsesBehaviour { failed; qed.", ); - if let Some(send_feedback) = self.send_feedback.remove( - &(protocol.clone(), request_id).into() - ) { + if let Some(send_feedback) = + self.send_feedback.remove(&(protocol.clone(), request_id).into()) + { let _ = send_feedback.send(()); } @@ -739,14 +742,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour { result: Ok(arrival_time), }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); - - } + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)) + }, }; } } - break Poll::Pending; + break Poll::Pending } } } @@ -786,7 +788,7 @@ pub enum ResponseFailure { /// Implements the libp2p [`RequestResponseCodec`] trait. Defines how streams of bytes are turned /// into requests and responses and vice-versa. #[derive(Debug, Clone)] -#[doc(hidden)] // Needs to be public in order to satisfy the Rust compiler. +#[doc(hidden)] // Needs to be public in order to satisfy the Rust compiler. pub struct GenericCodec { max_request_size: u64, max_response_size: u64, @@ -807,13 +809,14 @@ impl RequestResponseCodec for GenericCodec { T: AsyncRead + Unpin + Send, { // Read the length. - let length = unsigned_varint::aio::read_usize(&mut io).await + let length = unsigned_varint::aio::read_usize(&mut io) + .await .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; if length > usize::try_from(self.max_request_size).unwrap_or(usize::MAX) { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("Request size exceeds limit: {} > {}", length, self.max_request_size) - )); + format!("Request size exceeds limit: {} > {}", length, self.max_request_size), + )) } // Read the payload. @@ -840,17 +843,15 @@ impl RequestResponseCodec for GenericCodec { Ok(l) => l, Err(unsigned_varint::io::ReadError::Io(err)) if matches!(err.kind(), io::ErrorKind::UnexpectedEof) => - { - return Ok(Err(())); - } + return Ok(Err(())), Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), }; if length > usize::try_from(self.max_response_size).unwrap_or(usize::MAX) { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("Response size exceeds limit: {} > {}", length, self.max_response_size) - )); + format!("Response size exceeds limit: {} > {}", length, self.max_response_size), + )) } // Read the payload. @@ -913,23 +914,30 @@ impl RequestResponseCodec for GenericCodec { mod tests { use super::*; - use futures::channel::{mpsc, oneshot}; - use futures::executor::LocalPool; - use futures::task::Spawn; - use libp2p::identity::Keypair; - use libp2p::Multiaddr; - use libp2p::core::upgrade; - use libp2p::core::transport::{Transport, MemoryTransport}; - use libp2p::noise; - use libp2p::swarm::{Swarm, SwarmEvent}; + use futures::{ + channel::{mpsc, oneshot}, + executor::LocalPool, + task::Spawn, + }; + use libp2p::{ + core::{ + transport::{MemoryTransport, Transport}, + upgrade, + }, + identity::Keypair, + noise, + swarm::{Swarm, SwarmEvent}, + Multiaddr, + }; use std::{iter, time::Duration}; - fn build_swarm(list: impl Iterator) -> (Swarm, Multiaddr) { + fn build_swarm( + list: impl Iterator, + ) -> (Swarm, Multiaddr) { let keypair = Keypair::generate_ed25519(); - let noise_keys = noise::Keypair::::new() - .into_authentic(&keypair) - .unwrap(); + let noise_keys = + noise::Keypair::::new().into_authentic(&keypair).unwrap(); let transport = MemoryTransport .upgrade(upgrade::Version::V1) @@ -956,18 +964,24 @@ mod tests { .map(|_| { let (tx, mut rx) = mpsc::channel::(64); - pool.spawner().spawn_obj(async move { - while let Some(rq) = rx.next().await { - let (fb_tx, fb_rx) = oneshot::channel(); - assert_eq!(rq.payload, b"this is a request"); - let _ = rq.pending_response.send(super::OutgoingResponse { - result: Ok(b"this is a response".to_vec()), - reputation_changes: Vec::new(), - sent_feedback: Some(fb_tx), - }); - fb_rx.await.unwrap(); - } - }.boxed().into()).unwrap(); + pool.spawner() + .spawn_obj( + async move { + while let Some(rq) = rx.next().await { + let (fb_tx, fb_rx) = oneshot::channel(); + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(super::OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: Some(fb_tx), + }); + fb_rx.await.unwrap(); + } + } + .boxed() + .into(), + ) + .unwrap(); let protocol_config = ProtocolConfig { name: From::from(protocol_name), @@ -989,19 +1003,23 @@ mod tests { } // Running `swarm[0]` in the background. - pool.spawner().spawn_obj({ - let (mut swarm, _) = swarms.remove(0); - async move { - loop { - match swarm.next_event().await { - SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { - result.unwrap(); - }, - _ => {} + pool.spawner() + .spawn_obj({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.next_event().await { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + result.unwrap(); + }, + _ => {}, + } } } - }.boxed().into() - }).unwrap(); + .boxed() + .into() + }) + .unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); @@ -1021,14 +1039,12 @@ mod tests { ); assert!(response_receiver.is_none()); response_receiver = Some(receiver); - } - SwarmEvent::Behaviour(Event::RequestFinished { - result, .. - }) => { + }, + SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { result.unwrap(); - break; - } - _ => {} + break + }, + _ => {}, } } @@ -1046,21 +1062,27 @@ mod tests { .map(|_| { let (tx, mut rx) = mpsc::channel::(64); - pool.spawner().spawn_obj(async move { - while let Some(rq) = rx.next().await { - assert_eq!(rq.payload, b"this is a request"); - let _ = rq.pending_response.send(super::OutgoingResponse { - result: Ok(b"this response exceeds the limit".to_vec()), - reputation_changes: Vec::new(), - sent_feedback: None, - }); - } - }.boxed().into()).unwrap(); + pool.spawner() + .spawn_obj( + async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(super::OutgoingResponse { + result: Ok(b"this response exceeds the limit".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: None, + }); + } + } + .boxed() + .into(), + ) + .unwrap(); let protocol_config = ProtocolConfig { name: From::from(protocol_name), max_request_size: 1024, - max_response_size: 8, // <-- important for the test + max_response_size: 8, // <-- important for the test request_timeout: Duration::from_secs(30), inbound_queue: Some(tx), }; @@ -1078,20 +1100,24 @@ mod tests { // Running `swarm[0]` in the background until a `InboundRequest` event happens, // which is a hint about the test having ended. - pool.spawner().spawn_obj({ - let (mut swarm, _) = swarms.remove(0); - async move { - loop { - match swarm.next_event().await { - SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { - assert!(result.is_ok()); - break - }, - _ => {} + pool.spawner() + .spawn_obj({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.next_event().await { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + assert!(result.is_ok()); + break + }, + _ => {}, + } } } - }.boxed().into() - }).unwrap(); + .boxed() + .into() + }) + .unwrap(); // Remove and run the remaining swarm. let (mut swarm, _) = swarms.remove(0); @@ -1111,20 +1137,18 @@ mod tests { ); assert!(response_receiver.is_none()); response_receiver = Some(receiver); - } - SwarmEvent::Behaviour(Event::RequestFinished { - result, .. - }) => { + }, + SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { assert!(result.is_err()); - break; - } - _ => {} + break + }, + _ => {}, } } match response_receiver.unwrap().await.unwrap().unwrap_err() { RequestFailure::Network(OutboundFailure::ConnectionClosed) => {}, - _ => panic!() + _ => panic!(), } }); } @@ -1197,89 +1221,97 @@ mod tests { swarm_1.dial_addr(listen_add_2).unwrap(); // Run swarm 2 in the background, receiving two requests. - pool.spawner().spawn_obj( - async move { - loop { - match swarm_2.next_event().await { - SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { - result.unwrap(); - }, - _ => {} + pool.spawner() + .spawn_obj( + async move { + loop { + match swarm_2.next_event().await { + SwarmEvent::Behaviour(Event::InboundRequest { result, .. }) => { + result.unwrap(); + }, + _ => {}, + } } } - }.boxed().into() - ).unwrap(); + .boxed() + .into(), + ) + .unwrap(); // Handle both requests sent by swarm 1 to swarm 2 in the background. // // Make sure both requests overlap, by answering the first only after receiving the // second. - pool.spawner().spawn_obj(async move { - let protocol_1_request = swarm_2_handler_1.next().await; - let protocol_2_request = swarm_2_handler_2.next().await; - - protocol_1_request.unwrap() - .pending_response - .send(OutgoingResponse { - result: Ok(b"this is a response".to_vec()), - reputation_changes: Vec::new(), - sent_feedback: None, - }) - .unwrap(); - protocol_2_request.unwrap() - .pending_response - .send(OutgoingResponse { - result: Ok(b"this is a response".to_vec()), - reputation_changes: Vec::new(), - sent_feedback: None, - }) - .unwrap(); - }.boxed().into()).unwrap(); + pool.spawner() + .spawn_obj( + async move { + let protocol_1_request = swarm_2_handler_1.next().await; + let protocol_2_request = swarm_2_handler_2.next().await; + + protocol_1_request + .unwrap() + .pending_response + .send(OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .unwrap(); + protocol_2_request + .unwrap() + .pending_response + .send(OutgoingResponse { + result: Ok(b"this is a response".to_vec()), + reputation_changes: Vec::new(), + sent_feedback: None, + }) + .unwrap(); + } + .boxed() + .into(), + ) + .unwrap(); // Have swarm 1 send two requests to swarm 2 and await responses. - pool.run_until( - async move { - let mut response_receivers = None; - let mut num_responses = 0; - - loop { - match swarm_1.next_event().await { - SwarmEvent::ConnectionEstablished { peer_id, .. } => { - let (sender_1, receiver_1) = oneshot::channel(); - let (sender_2, receiver_2) = oneshot::channel(); - swarm_1.behaviour_mut().send_request( - &peer_id, - protocol_name_1, - b"this is a request".to_vec(), - sender_1, - IfDisconnected::ImmediateError, - ); - swarm_1.behaviour_mut().send_request( - &peer_id, - protocol_name_2, - b"this is a request".to_vec(), - sender_2, - IfDisconnected::ImmediateError, - ); - assert!(response_receivers.is_none()); - response_receivers = Some((receiver_1, receiver_2)); - } - SwarmEvent::Behaviour(Event::RequestFinished { - result, .. - }) => { - num_responses += 1; - result.unwrap(); - if num_responses == 2 { - break; - } + pool.run_until(async move { + let mut response_receivers = None; + let mut num_responses = 0; + + loop { + match swarm_1.next_event().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let (sender_1, receiver_1) = oneshot::channel(); + let (sender_2, receiver_2) = oneshot::channel(); + swarm_1.behaviour_mut().send_request( + &peer_id, + protocol_name_1, + b"this is a request".to_vec(), + sender_1, + IfDisconnected::ImmediateError, + ); + swarm_1.behaviour_mut().send_request( + &peer_id, + protocol_name_2, + b"this is a request".to_vec(), + sender_2, + IfDisconnected::ImmediateError, + ); + assert!(response_receivers.is_none()); + response_receivers = Some((receiver_1, receiver_2)); + }, + SwarmEvent::Behaviour(Event::RequestFinished { result, .. }) => { + num_responses += 1; + result.unwrap(); + if num_responses == 2 { + break } - _ => {} - } + }, + _ => {}, } - let (response_receiver_1, response_receiver_2) = response_receivers.unwrap(); - assert_eq!(response_receiver_1.await.unwrap().unwrap(), b"this is a response"); - assert_eq!(response_receiver_2.await.unwrap().unwrap(), b"this is a response"); } - ); + let (response_receiver_1, response_receiver_2) = response_receivers.unwrap(); + assert_eq!(response_receiver_1.await.unwrap().unwrap(), b"this is a response"); + assert_eq!(response_receiver_2.await.unwrap().unwrap(), b"this is a response"); + }); } } diff --git a/substrate/client/network/src/service.rs b/substrate/client/network/src/service.rs index fb303312093cd28aced9f7c4bb19299cf8eba8b4..89685849f5bfd4c9feffc14708514393451817a8 100644 --- a/substrate/client/network/src/service.rs +++ b/substrate/client/network/src/service.rs @@ -28,59 +28,45 @@ //! which is then processed by [`NetworkWorker::poll`]. use crate::{ - ExHashT, NetworkStateInfo, NetworkStatus, behaviour::{self, Behaviour, BehaviourOut}, + bitswap::Bitswap, config::{parse_str_addr, Params, TransportConfig}, - DhtEvent, discovery::DiscoveryConfig, error::Error, + light_client_requests, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, on_demand_layer::AlwaysBadChecker, - light_client_requests, protocol::{ self, - message::generic::Roles, - NotifsHandlerError, - NotificationsSink, - PeerInfo, - Protocol, - Ready, event::Event, - sync::{SyncState, Status as SyncStatus}, + message::generic::Roles, + sync::{Status as SyncStatus, SyncState}, + NotificationsSink, NotifsHandlerError, PeerInfo, Protocol, Ready, }, - transactions, - transport, ReputationChange, - - bitswap::Bitswap, + transactions, transport, DhtEvent, ExHashT, NetworkStateInfo, NetworkStatus, ReputationChange, }; use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; -use libp2p::{PeerId, multiaddr, Multiaddr}; -use libp2p::core::{ - ConnectedPoint, - Executor, - connection::{ - ConnectionLimits, - ConnectionError, - PendingConnectionError +use libp2p::{ + core::{ + connection::{ConnectionError, ConnectionLimits, PendingConnectionError}, + either::EitherError, + upgrade, ConnectedPoint, Executor, }, - either::EitherError, - upgrade -}; -use libp2p::kad::record; -use libp2p::ping::handler::PingFailure; -use libp2p::swarm::{ - AddressScore, - NetworkBehaviour, - SwarmBuilder, - SwarmEvent, - protocols_handler::NodeHandlerWrapperError + kad::record, + multiaddr, + ping::handler::PingFailure, + swarm::{ + protocols_handler::NodeHandlerWrapperError, AddressScore, NetworkBehaviour, SwarmBuilder, + SwarmEvent, + }, + Multiaddr, PeerId, }; -use log::{error, info, trace, debug, warn}; -use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; +use log::{debug, error, info, trace, warn}; +use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; use sc_peerset::PeersetHandle; use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; @@ -91,10 +77,9 @@ use std::{ cmp, collections::{HashMap, HashSet}, convert::TryFrom as _, - fs, - iter, + fs, iter, marker::PhantomData, - num:: NonZeroUsize, + num::NonZeroUsize, pin::Pin, str, sync::{ @@ -104,7 +89,9 @@ use std::{ task::Poll, }; -pub use behaviour::{ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, IfDisconnected}; +pub use behaviour::{ + IfDisconnected, InboundFailure, OutboundFailure, RequestFailure, ResponseFailure, +}; mod metrics; mod out_events; @@ -156,7 +143,12 @@ impl NetworkWorker { ¶ms.network_config.transport, )?; ensure_addresses_consistent_with_transport( - params.network_config.default_peers_set.reserved_nodes.iter().map(|x| &x.multiaddr), + params + .network_config + .default_peers_set + .reserved_nodes + .iter() + .map(|x| &x.multiaddr), ¶ms.network_config.transport, )?; for extra_set in ¶ms.network_config.extra_sets { @@ -176,10 +168,12 @@ impl NetworkWorker { fs::create_dir_all(path)?; } - let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( - params.protocol_id.clone() - ); - params.network_config.extra_sets.insert(0, transactions_handler_proto.set_config()); + let transactions_handler_proto = + transactions::TransactionsHandlerPrototype::new(params.protocol_id.clone()); + params + .network_config + .extra_sets + .insert(0, transactions_handler_proto.set_config()); // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; @@ -201,8 +195,12 @@ impl NetworkWorker { params.chain.clone(), params.protocol_id.clone(), ¶ms.network_config, - iter::once(Vec::new()).chain((0..params.network_config.extra_sets.len() - 1) - .map(|_| default_notif_handshake_message.clone())).collect(), + iter::once(Vec::new()) + .chain( + (0..params.network_config.extra_sets.len() - 1) + .map(|_| default_notif_handshake_message.clone()), + ) + .collect(), params.block_announce_validator, params.metrics_registry.as_ref(), )?; @@ -221,23 +219,21 @@ impl NetworkWorker { let boot_node_ids = Arc::new(boot_node_ids); // Check for duplicate bootnodes. - known_addresses.iter() - .try_for_each(|(peer_id, addr)| - if let Some(other) = known_addresses - .iter() - .find(|o| o.1 == *addr && o.0 != *peer_id) - { - Err(Error::DuplicateBootnode { - address: addr.clone(), - first_id: peer_id.clone(), - second_id: other.0.clone(), - }) - } else { - Ok(()) - } - )?; + known_addresses.iter().try_for_each(|(peer_id, addr)| { + if let Some(other) = known_addresses.iter().find(|o| o.1 == *addr && o.0 != *peer_id) { + Err(Error::DuplicateBootnode { + address: addr.clone(), + first_id: peer_id.clone(), + second_id: other.0.clone(), + }) + } else { + Ok(()) + } + })?; - let checker = params.on_demand.as_ref() + let checker = params + .on_demand + .as_ref() .map(|od| od.checker().clone()) .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); @@ -249,8 +245,7 @@ impl NetworkWorker { let (mut swarm, bandwidth): (Swarm, _) = { let user_agent = format!( "{} ({})", - params.network_config.client_version, - params.network_config.node_name + params.network_config.client_version, params.network_config.node_name ); let light_client_request_sender = { @@ -264,21 +259,25 @@ impl NetworkWorker { let discovery_config = { let mut config = DiscoveryConfig::new(local_public.clone()); config.with_user_defined(known_addresses); - config.discovery_limit(u64::from(params.network_config.default_peers_set.out_peers) + 15); + config.discovery_limit( + u64::from(params.network_config.default_peers_set.out_peers) + 15, + ); config.add_protocol(params.protocol_id.clone()); config.with_dht_random_walk(params.network_config.enable_dht_random_walk); config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); - config.use_kademlia_disjoint_query_paths(params.network_config.kademlia_disjoint_query_paths); + config.use_kademlia_disjoint_query_paths( + params.network_config.kademlia_disjoint_query_paths, + ); match params.network_config.transport { TransportConfig::MemoryOnly => { config.with_mdns(false); config.allow_private_ipv4(false); - } + }, TransportConfig::Normal { enable_mdns, allow_private_ipv4, .. } => { config.with_mdns(enable_mdns); config.allow_private_ipv4(allow_private_ipv4); - } + }, } config @@ -288,7 +287,7 @@ impl NetworkWorker { let (config_mem, config_wasm) = match params.network_config.transport { TransportConfig::MemoryOnly => (true, None), TransportConfig::Normal { wasm_external_transport, .. } => - (false, wasm_external_transport) + (false, wasm_external_transport), }; // The yamux buffer size limit is configured to be equal to the maximum frame size @@ -298,27 +297,33 @@ impl NetworkWorker { // a variable-length-encoding 64bits number. In other words, we make the // assumption that no notification larger than 2^64 will ever be sent. let yamux_maximum_buffer_size = { - let requests_max = params.network_config - .request_response_protocols.iter() + let requests_max = params + .network_config + .request_response_protocols + .iter() .map(|cfg| usize::try_from(cfg.max_request_size).unwrap_or(usize::MAX)); - let responses_max = params.network_config - .request_response_protocols.iter() - .map(|cfg| usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX)); - let notifs_max = params.network_config - .extra_sets.iter() - .map(|cfg| usize::try_from(cfg.max_notification_size).unwrap_or(usize::MAX)); + let responses_max = + params.network_config.request_response_protocols.iter().map(|cfg| { + usize::try_from(cfg.max_response_size).unwrap_or(usize::MAX) + }); + let notifs_max = params.network_config.extra_sets.iter().map(|cfg| { + usize::try_from(cfg.max_notification_size).unwrap_or(usize::MAX) + }); // A "default" max is added to cover all the other protocols: ping, identify, // kademlia, block announces, and transactions. let default_max = cmp::max( 1024 * 1024, usize::try_from(protocol::BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE) - .unwrap_or(usize::MAX) + .unwrap_or(usize::MAX), ); iter::once(default_max) - .chain(requests_max).chain(responses_max).chain(notifs_max) - .max().expect("iterator known to always yield at least one element; qed") + .chain(requests_max) + .chain(responses_max) + .chain(notifs_max) + .max() + .expect("iterator known to always yield at least one element; qed") .saturating_add(10) }; @@ -327,7 +332,7 @@ impl NetworkWorker { config_mem, config_wasm, params.network_config.yamux_window_size, - yamux_maximum_buffer_size + yamux_maximum_buffer_size, ) }; @@ -348,18 +353,18 @@ impl NetworkWorker { match result { Ok(b) => b, - Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => { - return Err(Error::DuplicateRequestResponseProtocol { - protocol: proto, - }) - }, + Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => + return Err(Error::DuplicateRequestResponseProtocol { protocol: proto }), } }; let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) - .connection_limits(ConnectionLimits::default() - .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) - .with_max_established_incoming(Some(crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING)) + .connection_limits( + ConnectionLimits::default() + .with_max_established_per_peer(Some(crate::MAX_CONNECTIONS_PER_PEER as u32)) + .with_max_established_incoming(Some( + crate::MAX_CONNECTIONS_ESTABLISHED_INCOMING, + )), ) .substream_upgrade_protocol_override(upgrade::Version::V1Lazy) .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) @@ -378,14 +383,15 @@ impl NetworkWorker { // Initialize the metrics. let metrics = match ¶ms.metrics_registry { - Some(registry) => { - Some(metrics::register(registry, MetricSources { + Some(registry) => Some(metrics::register( + registry, + MetricSources { bandwidth: bandwidth.clone(), major_syncing: is_major_syncing.clone(), connected_peers: num_connected.clone(), - })?) - } - None => None + }, + )?), + None => None, }; // Listen on multiaddresses. @@ -412,8 +418,9 @@ impl NetworkWorker { local_peer_id, to_worker, peers_notifications_sinks: peers_notifications_sinks.clone(), - notifications_sizes_metric: - metrics.as_ref().map(|metrics| metrics.notifications_sizes.clone()), + notifications_sizes_metric: metrics + .as_ref() + .map(|metrics| metrics.notifications_sizes.clone()), _marker: PhantomData, }); @@ -421,7 +428,7 @@ impl NetworkWorker { service.clone(), params.role, params.transaction_pool, - params.metrics_registry.as_ref() + params.metrics_registry.as_ref(), )?; (params.transactions_handler_executor)(tx_handler.run().boxed()); @@ -520,12 +527,18 @@ impl NetworkWorker { /// You must call this when a new block is finalized by the client. pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) { - self.network_service.behaviour_mut().user_protocol_mut().on_block_finalized(hash, &header); + self.network_service + .behaviour_mut() + .user_protocol_mut() + .on_block_finalized(hash, &header); } /// Inform the network service about new best imported block. pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - self.network_service.behaviour_mut().user_protocol_mut().new_best_block_imported(hash, number); + self.network_service + .behaviour_mut() + .user_protocol_mut() + .new_best_block_imported(hash, number); } /// Returns the local `PeerId`. @@ -550,50 +563,76 @@ impl NetworkWorker { let connected_peers = { let swarm = &mut *swarm; - open.iter().filter_map(move |peer_id| { - let known_addresses = NetworkBehaviour::addresses_of_peer(swarm.behaviour_mut(), peer_id) - .into_iter().collect(); - - let endpoint = if let Some(e) = swarm.behaviour_mut().node(peer_id).map(|i| i.endpoint()).flatten() { - e.clone().into() - } else { - error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ + open.iter() + .filter_map(move |peer_id| { + let known_addresses = + NetworkBehaviour::addresses_of_peer(swarm.behaviour_mut(), peer_id) + .into_iter() + .collect(); + + let endpoint = if let Some(e) = + swarm.behaviour_mut().node(peer_id).map(|i| i.endpoint()).flatten() + { + e.clone().into() + } else { + error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ and debug information about {:?}", peer_id); - return None - }; - - Some((peer_id.to_base58(), NetworkStatePeer { - endpoint, - version_string: swarm.behaviour_mut().node(peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())), - latest_ping_time: swarm.behaviour_mut().node(peer_id).and_then(|i| i.latest_ping()), - known_addresses, - })) - }).collect() + return None + }; + + Some(( + peer_id.to_base58(), + NetworkStatePeer { + endpoint, + version_string: swarm + .behaviour_mut() + .node(peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())), + latest_ping_time: swarm + .behaviour_mut() + .node(peer_id) + .and_then(|i| i.latest_ping()), + known_addresses, + }, + )) + }) + .collect() }; let not_connected_peers = { let swarm = &mut *swarm; - swarm.behaviour_mut().known_peers().into_iter() + swarm + .behaviour_mut() + .known_peers() + .into_iter() .filter(|p| open.iter().all(|n| n != p)) .map(move |peer_id| { - (peer_id.to_base58(), NetworkStateNotConnectedPeer { - version_string: swarm.behaviour_mut().node(&peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())), - latest_ping_time: swarm.behaviour_mut().node(&peer_id).and_then(|i| i.latest_ping()), - known_addresses: NetworkBehaviour::addresses_of_peer(swarm.behaviour_mut(), &peer_id) - .into_iter().collect(), - }) + ( + peer_id.to_base58(), + NetworkStateNotConnectedPeer { + version_string: swarm + .behaviour_mut() + .node(&peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())), + latest_ping_time: swarm + .behaviour_mut() + .node(&peer_id) + .and_then(|i| i.latest_ping()), + known_addresses: NetworkBehaviour::addresses_of_peer( + swarm.behaviour_mut(), + &peer_id, + ) + .into_iter() + .collect(), + }, + ) }) .collect() }; let peer_id = Swarm::::local_peer_id(&swarm).to_base58(); let listened_addresses = swarm.listeners().cloned().collect(); - let external_addresses = swarm.external_addresses() - .map(|r| &r.addr) - .cloned() - .collect(); + let external_addresses = swarm.external_addresses().map(|r| &r.addr).cloned().collect(); NetworkState { peer_id, @@ -607,7 +646,9 @@ impl NetworkWorker { /// Get currently connected peers. pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo)> { - self.network_service.behaviour_mut().user_protocol_mut() + self.network_service + .behaviour_mut() + .user_protocol_mut() .peers_info() .map(|(id, info)| (id.clone(), info.clone())) .collect() @@ -641,9 +682,7 @@ impl NetworkService { /// Need a better solution to manage authorized peers, but now just use reserved peers for /// prototyping. pub fn set_authorized_peers(&self, peers: HashSet) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReserved(peers)); } /// Set authorized_only flag. @@ -682,8 +721,12 @@ impl NetworkService { /// /// The protocol must have been registered with /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). - /// - pub fn write_notification(&self, target: PeerId, protocol: Cow<'static, str>, message: Vec) { + pub fn write_notification( + &self, + target: PeerId, + protocol: Cow<'static, str>, + message: Vec, + ) { // We clone the `NotificationsSink` in order to be able to unlock the network-wide // `peers_notifications_sinks` mutex as soon as possible. let sink = { @@ -697,7 +740,7 @@ impl NetworkService { "Attempted to send notification on missing or closed substream: {}, {:?}", target, protocol, ); - return; + return } }; @@ -756,9 +799,9 @@ impl NetworkService { /// // Do NOT do this /// for peer in peers { /// if let Ok(n) = network.notification_sender(peer, ...) { - /// if let Ok(s) = n.ready().await { - /// let _ = s.send(...); - /// } + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } /// } /// } /// ``` @@ -785,7 +828,6 @@ impl NetworkService { /// /// See also the [`gossip`](crate::gossip) module for a higher-level way to send /// notifications. - /// pub fn notification_sender( &self, target: PeerId, @@ -798,19 +840,16 @@ impl NetworkService { if let Some(sink) = peers_notifications_sinks.get(&(target, protocol.clone())) { sink.clone() } else { - return Err(NotificationSenderError::Closed); + return Err(NotificationSenderError::Closed) } }; - let notification_size_metric = self.notifications_sizes_metric.as_ref().map(|histogram| { - histogram.with_label_values(&["out", &protocol]) - }); + let notification_size_metric = self + .notifications_sizes_metric + .as_ref() + .map(|histogram| histogram.with_label_values(&["out", &protocol])); - Ok(NotificationSender { - sink, - protocol_name: protocol, - notification_size_metric, - }) + Ok(NotificationSender { sink, protocol_name: protocol, notification_size_metric }) } /// Returns a stream containing the events that happen on the network. @@ -898,9 +937,9 @@ impl NetworkService { pub async fn status(&self) -> Result, ()> { let (tx, rx) = oneshot::channel(); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::NetworkStatus { - pending_response: tx, - }); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkStatus { pending_response: tx }); match rx.await { Ok(v) => v.map_err(|_| ()), @@ -918,9 +957,9 @@ impl NetworkService { pub async fn network_state(&self) -> Result { let (tx, rx) = oneshot::channel(); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::NetworkState { - pending_response: tx, - }); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::NetworkState { pending_response: tx }); match rx.await { Ok(v) => v.map_err(|_| ()), @@ -967,7 +1006,9 @@ impl NetworkService { /// prevents the local node from re-establishing an outgoing substream to this peer until it /// is added again. pub fn disconnect_peer(&self, who: PeerId, protocol: impl Into>) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who, protocol.into())); } /// Request a justification for the given block from the network. @@ -982,9 +1023,7 @@ impl NetworkService { /// Clear all pending justification requests. pub fn clear_justification_requests(&self) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::ClearJustificationRequests); } /// Are we in the process of downloading the chain? @@ -997,9 +1036,7 @@ impl NetworkService { /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an /// item on the [`NetworkWorker`] stream. pub fn get_value(&self, key: &record::Key) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); } /// Start putting a value in the DHT. @@ -1007,24 +1044,18 @@ impl NetworkService { /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an /// item on the [`NetworkWorker`] stream. pub fn put_value(&self, key: record::Key, value: Vec) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); } /// Connect to unreserved peers and allow unreserved peers to connect for syncing purposes. pub fn accept_unreserved_peers(&self) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(false)); } /// Disconnect from unreserved peers and deny new unreserved peers to connect for syncing /// purposes. pub fn deny_unreserved_peers(&self) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SetReservedOnly(true)); } /// Adds a `PeerId` and its address as reserved. The string should encode the address @@ -1042,17 +1073,13 @@ impl NetworkService { let _ = self .to_worker .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AddReserved(peer_id)); Ok(()) } /// Removes a `PeerId` from the list of reserved peers. pub fn remove_reserved_peer(&self, peer_id: PeerId) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } /// Add peers to a peer set. @@ -1062,7 +1089,11 @@ impl NetworkService { /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - pub fn add_peers_to_reserved_set(&self, protocol: Cow<'static, str>, peers: HashSet) -> Result<(), String> { + pub fn add_peers_to_reserved_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, addr) in peers.into_iter() { @@ -1090,12 +1121,11 @@ impl NetworkService { /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - // // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for convenience. pub fn remove_peers_from_reserved_set( &self, protocol: Cow<'static, str>, - peers: HashSet + peers: HashSet, ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, _) in peers.into_iter() { @@ -1113,9 +1143,7 @@ impl NetworkService { /// a stale fork missing. /// Passing empty `peers` set effectively removes the sync request. pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); } /// Add a peer to a set of peers. @@ -1127,7 +1155,11 @@ impl NetworkService { /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - pub fn add_to_peers_set(&self, protocol: Cow<'static, str>, peers: HashSet) -> Result<(), String> { + pub fn add_to_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, addr) in peers.into_iter() { @@ -1157,9 +1189,12 @@ impl NetworkService { /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - // // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for convenience. - pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: HashSet) -> Result<(), String> { + pub fn remove_from_peers_set( + &self, + protocol: Cow<'static, str>, + peers: HashSet, + ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; for (peer_id, _) in peers.into_iter() { let _ = self @@ -1185,8 +1220,12 @@ impl NetworkService { /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - fn split_multiaddr_and_peer_id(&self, peers: HashSet) -> Result, String> { - peers.into_iter() + fn split_multiaddr_and_peer_id( + &self, + peers: HashSet, + ) -> Result, String> { + peers + .into_iter() .map(|mut addr| { let peer = match addr.pop() { Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) @@ -1206,9 +1245,7 @@ impl NetworkService { } } -impl sp_consensus::SyncOracle - for NetworkService -{ +impl sp_consensus::SyncOracle for NetworkService { fn is_major_syncing(&mut self) -> bool { NetworkService::is_major_syncing(self) } @@ -1218,9 +1255,7 @@ impl sp_consensus::SyncOracle } } -impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle - for &'a NetworkService -{ +impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a NetworkService { fn is_major_syncing(&mut self) -> bool { NetworkService::is_major_syncing(self) } @@ -1241,9 +1276,9 @@ impl sp_consensus::JustificationSyncLink for NetworkSe } impl NetworkStateInfo for NetworkService - where - B: sp_runtime::traits::Block, - H: ExHashT, +where + B: sp_runtime::traits::Block, + H: ExHashT, { /// Returns the local external addresses. fn external_addresses(&self) -> Vec { @@ -1271,7 +1306,9 @@ pub struct NotificationSender { impl NotificationSender { /// Returns a future that resolves when the `NotificationSender` is ready to send a notification. - pub async fn ready<'a>(&'a self) -> Result, NotificationSenderError> { + pub async fn ready<'a>( + &'a self, + ) -> Result, NotificationSenderError> { Ok(NotificationSenderReady { ready: match self.sink.reserve_notification().await { Ok(r) => r, @@ -1318,9 +1355,7 @@ impl<'a> NotificationSenderReady<'a> { ); trace!(target: "sub-libp2p", "Handler({:?}) <= Async notification", self.peer_id); - self.ready - .send(notification) - .map_err(|()| NotificationSenderError::Closed) + self.ready.send(notification).map_err(|()| NotificationSenderError::Closed) } } @@ -1417,9 +1452,8 @@ impl Future for NetworkWorker { let this = &mut *self; // Poll the import queue for actions to perform. - this.import_queue.poll_actions(cx, &mut NetworkLink { - protocol: &mut this.network_service, - }); + this.import_queue + .poll_actions(cx, &mut NetworkLink { protocol: &mut this.network_service }); // Check for new incoming light client requests. if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { @@ -1428,8 +1462,10 @@ impl Future for NetworkWorker { match result { Ok(()) => {}, Err(light_client_requests::sender::SendRequestError::TooManyRequests) => { - log::warn!("Couldn't start light client request: too many pending requests"); - } + log::warn!( + "Couldn't start light client request: too many pending requests" + ); + }, } if let Some(metrics) = this.metrics.as_ref() { @@ -1451,7 +1487,7 @@ impl Future for NetworkWorker { num_iterations += 1; if num_iterations >= 100 { cx.waker().wake_by_ref(); - break; + break } // Process the next message coming from the `NetworkService`. @@ -1462,12 +1498,21 @@ impl Future for NetworkWorker { }; match msg { - ServiceToWorkerMsg::AnnounceBlock(hash, data) => - this.network_service.behaviour_mut().user_protocol_mut().announce_block(hash, data), - ServiceToWorkerMsg::RequestJustification(hash, number) => - this.network_service.behaviour_mut().user_protocol_mut().request_justification(&hash, number), - ServiceToWorkerMsg::ClearJustificationRequests => - this.network_service.behaviour_mut().user_protocol_mut().clear_justification_requests(), + ServiceToWorkerMsg::AnnounceBlock(hash, data) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .announce_block(hash, data), + ServiceToWorkerMsg::RequestJustification(hash, number) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .request_justification(&hash, number), + ServiceToWorkerMsg::ClearJustificationRequests => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .clear_justification_requests(), ServiceToWorkerMsg::PropagateTransaction(hash) => this.tx_handler_controller.propagate_transaction(hash), ServiceToWorkerMsg::PropagateTransactions => @@ -1476,30 +1521,68 @@ impl Future for NetworkWorker { this.network_service.behaviour_mut().get_value(&key), ServiceToWorkerMsg::PutValue(key, value) => this.network_service.behaviour_mut().put_value(key, value), - ServiceToWorkerMsg::SetReservedOnly(reserved_only) => - this.network_service.behaviour_mut().user_protocol_mut().set_reserved_only(reserved_only), - ServiceToWorkerMsg::SetReserved(peers) => - this.network_service.behaviour_mut().user_protocol_mut().set_reserved_peers(peers), - ServiceToWorkerMsg::AddReserved(peer_id) => - this.network_service.behaviour_mut().user_protocol_mut().add_reserved_peer(peer_id), - ServiceToWorkerMsg::RemoveReserved(peer_id) => - this.network_service.behaviour_mut().user_protocol_mut().remove_reserved_peer(peer_id), - ServiceToWorkerMsg::AddSetReserved(protocol, peer_id) => - this.network_service.behaviour_mut().user_protocol_mut().add_set_reserved_peer(protocol, peer_id), - ServiceToWorkerMsg::RemoveSetReserved(protocol, peer_id) => - this.network_service.behaviour_mut().user_protocol_mut().remove_set_reserved_peer(protocol, peer_id), + ServiceToWorkerMsg::SetReservedOnly(reserved_only) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_reserved_only(reserved_only), + ServiceToWorkerMsg::SetReserved(peers) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_reserved_peers(peers), + ServiceToWorkerMsg::AddReserved(peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .add_reserved_peer(peer_id), + ServiceToWorkerMsg::RemoveReserved(peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .remove_reserved_peer(peer_id), + ServiceToWorkerMsg::AddSetReserved(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .add_set_reserved_peer(protocol, peer_id), + ServiceToWorkerMsg::RemoveSetReserved(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .remove_set_reserved_peer(protocol, peer_id), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => this.network_service.behaviour_mut().add_known_address(peer_id, addr), - ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => - this.network_service.behaviour_mut().user_protocol_mut().add_to_peers_set(protocol, peer_id), - ServiceToWorkerMsg::RemoveFromPeersSet(protocol, peer_id) => - this.network_service.behaviour_mut().user_protocol_mut().remove_from_peers_set(protocol, peer_id), - ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => - this.network_service.behaviour_mut().user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), - ServiceToWorkerMsg::EventStream(sender) => - this.event_streams.push(sender), - ServiceToWorkerMsg::Request { target, protocol, request, pending_response, connect } => { - this.network_service.behaviour_mut().send_request(&target, &protocol, request, pending_response, connect); + ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .add_to_peers_set(protocol, peer_id), + ServiceToWorkerMsg::RemoveFromPeersSet(protocol, peer_id) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .remove_from_peers_set(protocol, peer_id), + ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_sync_fork_request(peer_ids, &hash, number), + ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), + ServiceToWorkerMsg::Request { + target, + protocol, + request, + pending_response, + connect, + } => { + this.network_service.behaviour_mut().send_request( + &target, + &protocol, + request, + pending_response, + connect, + ); }, ServiceToWorkerMsg::NetworkStatus { pending_response } => { let _ = pending_response.send(Ok(this.status())); @@ -1507,10 +1590,16 @@ impl Future for NetworkWorker { ServiceToWorkerMsg::NetworkState { pending_response } => { let _ = pending_response.send(Ok(this.network_state())); }, - ServiceToWorkerMsg::DisconnectPeer(who, protocol_name) => - this.network_service.behaviour_mut().user_protocol_mut().disconnect_peer(&who, &protocol_name), - ServiceToWorkerMsg::NewBestBlockImported(hash, number) => - this.network_service.behaviour_mut().user_protocol_mut().new_best_block_imported(hash, number), + ServiceToWorkerMsg::DisconnectPeer(who, protocol_name) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .disconnect_peer(&who, &protocol_name), + ServiceToWorkerMsg::NewBestBlockImported(hash, number) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .new_best_block_imported(hash, number), } } @@ -1521,7 +1610,7 @@ impl Future for NetworkWorker { num_iterations += 1; if num_iterations >= 1000 { cx.waker().wake_by_ref(); - break; + break } // Process the next action coming from the network. @@ -1537,28 +1626,40 @@ impl Future for NetworkWorker { } this.import_queue.import_blocks(origin, blocks); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justifications))) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport( + origin, + hash, + nb, + justifications, + ))) => { if let Some(metrics) = this.metrics.as_ref() { metrics.import_queue_justifications_submitted.inc(); } this.import_queue.import_justifications(origin, hash, nb, justifications); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { + protocol, + result, + .. + })) => { if let Some(metrics) = this.metrics.as_ref() { match result { Ok(serve_time) => { - metrics.requests_in_success_total + metrics + .requests_in_success_total .with_label_values(&[&protocol]) .observe(serve_time.as_secs_f64()); - } + }, Err(err) => { let reason = match err { ResponseFailure::Network(InboundFailure::Timeout) => "timeout", - ResponseFailure::Network(InboundFailure::UnsupportedProtocols) => - // `UnsupportedProtocols` is reported for every single - // inbound request whenever a request with an unsupported - // protocol is received. This is not reported in order to - // avoid confusions. + ResponseFailure::Network( + InboundFailure::UnsupportedProtocols, + ) => + // `UnsupportedProtocols` is reported for every single + // inbound request whenever a request with an unsupported + // protocol is received. This is not reported in order to + // avoid confusions. continue, ResponseFailure::Network(InboundFailure::ResponseOmission) => "busy-omitted", @@ -1566,23 +1667,28 @@ impl Future for NetworkWorker { "connection-closed", }; - metrics.requests_in_failure_total + metrics + .requests_in_failure_total .with_label_values(&[&protocol, reason]) .inc(); - } + }, } } }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { - protocol, duration, result, .. - })) => { + protocol, + duration, + result, + .. + })) => if let Some(metrics) = this.metrics.as_ref() { match result { Ok(_) => { - metrics.requests_out_success_total + metrics + .requests_out_success_total .with_label_values(&[&protocol]) .observe(duration.as_secs_f64()); - } + }, Err(err) => { let reason = match err { RequestFailure::NotConnected => "not-connected", @@ -1591,34 +1697,42 @@ impl Future for NetworkWorker { RequestFailure::Obsolete => "obsolete", RequestFailure::Network(OutboundFailure::DialFailure) => "dial-failure", - RequestFailure::Network(OutboundFailure::Timeout) => - "timeout", + RequestFailure::Network(OutboundFailure::Timeout) => "timeout", RequestFailure::Network(OutboundFailure::ConnectionClosed) => "connection-closed", - RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => - "unsupported", + RequestFailure::Network( + OutboundFailure::UnsupportedProtocols, + ) => "unsupported", }; - metrics.requests_out_failure_total + metrics + .requests_out_failure_total .with_label_values(&[&protocol, reason]) .inc(); - } + }, } - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted(protocol))) => { + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted( + protocol, + ))) => if let Some(metrics) = this.metrics.as_ref() { - metrics.kademlia_random_queries_total + metrics + .kademlia_random_queries_total .with_label_values(&[&protocol.as_ref()]) .inc(); - } - }, + }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { - remote, protocol, negotiated_fallback, notifications_sink, role + remote, + protocol, + negotiated_fallback, + notifications_sink, + role, })) => { if let Some(metrics) = this.metrics.as_ref() { - metrics.notifications_streams_opened_total - .with_label_values(&[&protocol]).inc(); + metrics + .notifications_streams_opened_total + .with_label_values(&[&protocol]) + .inc(); } { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); @@ -1634,7 +1748,9 @@ impl Future for NetworkWorker { }); }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { - remote, protocol, notifications_sink + remote, + protocol, + notifications_sink, })) => { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); if let Some(s) = peers_notifications_sinks.get_mut(&(remote, protocol)) { @@ -1658,20 +1774,25 @@ impl Future for NetworkWorker { // acceptable, this bug is at the moment intentionally left there and is // intended to be fixed at the same time as // https://github.com/paritytech/substrate/issues/6403. - /*this.event_streams.send(Event::NotificationStreamClosed { - remote, - protocol, - }); - this.event_streams.send(Event::NotificationStreamOpened { - remote, - protocol, - role, - });*/ + // this.event_streams.send(Event::NotificationStreamClosed { + // remote, + // protocol, + // }); + // this.event_streams.send(Event::NotificationStreamOpened { + // remote, + // protocol, + // role, + // }); }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, protocol })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { + remote, + protocol, + })) => { if let Some(metrics) = this.metrics.as_ref() { - metrics.notifications_streams_closed_total - .with_label_values(&[&protocol[..]]).inc(); + metrics + .notifications_streams_closed_total + .with_label_values(&[&protocol[..]]) + .inc(); } this.event_streams.send(Event::NotificationStreamClosed { remote: remote.clone(), @@ -1679,23 +1800,24 @@ impl Future for NetworkWorker { }); { let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); - let _previous_value = peers_notifications_sinks - .remove(&(remote.clone(), protocol)); + let _previous_value = + peers_notifications_sinks.remove(&(remote.clone(), protocol)); debug_assert!(_previous_value.is_some()); } }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages })) => { + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { + remote, + messages, + })) => { if let Some(metrics) = this.metrics.as_ref() { for (protocol, message) in &messages { - metrics.notifications_sizes + metrics + .notifications_sizes .with_label_values(&["in", protocol]) .observe(message.len() as f64); } } - this.event_streams.send(Event::NotificationsReceived { - remote, - messages, - }); + this.event_streams.send(Event::NotificationsReceived { remote, messages }); }, Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::SyncConnected(remote))) => { this.event_streams.send(Event::SyncConnected { remote }); @@ -1711,13 +1833,19 @@ impl Future for NetworkWorker { DhtEvent::ValuePut(_) => "value-put", DhtEvent::ValuePutFailed(_) => "value-put-failed", }; - metrics.kademlia_query_duration.with_label_values(&[query_type]) + metrics + .kademlia_query_duration + .with_label_values(&[query_type]) .observe(duration.as_secs_f64()); } this.event_streams.send(Event::Dht(event)); }, - Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, num_established }) => { + Poll::Ready(SwarmEvent::ConnectionEstablished { + peer_id, + endpoint, + num_established, + }) => { debug!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); if let Some(metrics) = this.metrics.as_ref() { @@ -1732,7 +1860,12 @@ impl Future for NetworkWorker { } } }, - Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, num_established }) => { + Poll::Ready(SwarmEvent::ConnectionClosed { + peer_id, + cause, + endpoint, + num_established, + }) => { debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); if let Some(metrics) = this.metrics.as_ref() { let direction = match endpoint { @@ -1741,17 +1874,27 @@ impl Future for NetworkWorker { }; let reason = match cause { Some(ConnectionError::IO(_)) => "transport-error", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::B(EitherError::A( - PingFailure::Timeout)))))))) => "ping-timeout", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A( - NotifsHandlerError::SyncNotificationsClogged))))))) => "sync-notifications-clogged", - Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => "protocol-error", - Some(ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout)) => "keep-alive-timeout", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler( + EitherError::A(EitherError::A(EitherError::A(EitherError::B( + EitherError::A(PingFailure::Timeout), + )))), + ))) => "ping-timeout", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler( + EitherError::A(EitherError::A(EitherError::A(EitherError::A( + NotifsHandlerError::SyncNotificationsClogged, + )))), + ))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => + "protocol-error", + Some(ConnectionError::Handler( + NodeHandlerWrapperError::KeepAliveTimeout, + )) => "keep-alive-timeout", None => "actively-closed", }; - metrics.connections_closed_total.with_label_values(&[direction, reason]).inc(); + metrics + .connections_closed_total + .with_label_values(&[direction, reason]) + .inc(); // `num_established` represents the number of *remaining* connections. if num_established == 0 { @@ -1791,15 +1934,22 @@ impl Future for NetworkWorker { if let Some(metrics) = this.metrics.as_ref() { match error { - PendingConnectionError::ConnectionLimit(_) => - metrics.pending_connections_errors_total.with_label_values(&["limit-reached"]).inc(), - PendingConnectionError::InvalidPeerId => - metrics.pending_connections_errors_total.with_label_values(&["invalid-peer-id"]).inc(), - PendingConnectionError::Transport(_) | PendingConnectionError::IO(_) => - metrics.pending_connections_errors_total.with_label_values(&["transport-error"]).inc(), + PendingConnectionError::ConnectionLimit(_) => metrics + .pending_connections_errors_total + .with_label_values(&["limit-reached"]) + .inc(), + PendingConnectionError::InvalidPeerId => metrics + .pending_connections_errors_total + .with_label_values(&["invalid-peer-id"]) + .inc(), + PendingConnectionError::Transport(_) | + PendingConnectionError::IO(_) => metrics + .pending_connections_errors_total + .with_label_values(&["transport-error"]) + .inc(), } } - } + }, Poll::Ready(SwarmEvent::Dialing(peer_id)) => trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id), Poll::Ready(SwarmEvent::IncomingConnection { local_addr, send_back_addr }) => { @@ -1809,7 +1959,11 @@ impl Future for NetworkWorker { metrics.incoming_connections_total.inc(); } }, - Poll::Ready(SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error }) => { + Poll::Ready(SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + }) => { debug!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", local_addr, send_back_addr, error); if let Some(metrics) = this.metrics.as_ref() { @@ -1820,14 +1974,20 @@ impl Future for NetworkWorker { PendingConnectionError::IO(_) => "transport-error", }; - metrics.incoming_connections_errors_total.with_label_values(&[reason]).inc(); + metrics + .incoming_connections_errors_total + .with_label_values(&[reason]) + .inc(); } }, Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { debug!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", peer_id, endpoint); if let Some(metrics) = this.metrics.as_ref() { - metrics.incoming_connections_errors_total.with_label_values(&["banned"]).inc(); + metrics + .incoming_connections_errors_total + .with_label_values(&["banned"]) + .inc(); } }, Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => @@ -1837,8 +1997,8 @@ impl Future for NetworkWorker { if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_local_addresses.sub(addresses.len() as u64); } - let addrs = addresses.into_iter().map(|a| a.to_string()) - .collect::>().join(", "); + let addrs = + addresses.into_iter().map(|a| a.to_string()).collect::>().join(", "); match reason { Ok(()) => error!( target: "sub-libp2p", @@ -1861,7 +2021,8 @@ impl Future for NetworkWorker { }; } - let num_connected_peers = this.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); + let num_connected_peers = + this.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); // Update the variables shared with the `NetworkService`. this.num_connected.store(num_connected_peers, Ordering::Relaxed); @@ -1873,10 +2034,11 @@ impl Future for NetworkWorker { *this.external_addresses.lock() = external_addresses; } - let is_major_syncing = match this.network_service.behaviour_mut().user_protocol_mut().sync_state().state { - SyncState::Idle => false, - SyncState::Downloading => true, - }; + let is_major_syncing = + match this.network_service.behaviour_mut().user_protocol_mut().sync_state().state { + SyncState::Idle => false, + SyncState::Downloading => true, + }; this.tx_handler_controller.set_gossip_enabled(!is_major_syncing); @@ -1885,25 +2047,41 @@ impl Future for NetworkWorker { if let Some(metrics) = this.metrics.as_ref() { for (proto, buckets) in this.network_service.behaviour_mut().num_entries_per_kbucket() { for (lower_ilog2_bucket_bound, num_entries) in buckets { - metrics.kbuckets_num_nodes - .with_label_values(&[&proto.as_ref(), &lower_ilog2_bucket_bound.to_string()]) + metrics + .kbuckets_num_nodes + .with_label_values(&[ + &proto.as_ref(), + &lower_ilog2_bucket_bound.to_string(), + ]) .set(num_entries as u64); } } - for (proto, num_entries) in this.network_service.behaviour_mut().num_kademlia_records() { - metrics.kademlia_records_count.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); + for (proto, num_entries) in this.network_service.behaviour_mut().num_kademlia_records() + { + metrics + .kademlia_records_count + .with_label_values(&[&proto.as_ref()]) + .set(num_entries as u64); } - for (proto, num_entries) in this.network_service.behaviour_mut().kademlia_records_total_size() { - metrics.kademlia_records_sizes_total.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); + for (proto, num_entries) in + this.network_service.behaviour_mut().kademlia_records_total_size() + { + metrics + .kademlia_records_sizes_total + .with_label_values(&[&proto.as_ref()]) + .set(num_entries as u64); } - metrics.peerset_num_discovered.set( - this.network_service.behaviour_mut().user_protocol().num_discovered_peers() as u64 - ); + metrics + .peerset_num_discovered + .set(this.network_service.behaviour_mut().user_protocol().num_discovered_peers() + as u64); metrics.peerset_num_requested.set( - this.network_service.behaviour_mut().user_protocol().requested_peers().count() as u64 + this.network_service.behaviour_mut().user_protocol().requested_peers().count() + as u64, ); metrics.pending_connections.set( - Swarm::network_info(&this.network_service).connection_counters().num_pending() as u64 + Swarm::network_info(&this.network_service).connection_counters().num_pending() + as u64, ); } @@ -1911,8 +2089,7 @@ impl Future for NetworkWorker { } } -impl Unpin for NetworkWorker { -} +impl Unpin for NetworkWorker {} /// The libp2p swarm, customized for our needs. type Swarm = libp2p::swarm::Swarm>; @@ -1927,15 +2104,32 @@ impl<'a, B: BlockT> Link for NetworkLink<'a, B> { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - self.protocol.behaviour_mut().user_protocol_mut().on_blocks_processed(imported, count, results) + self.protocol + .behaviour_mut() + .user_protocol_mut() + .on_blocks_processed(imported, count, results) } - fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { - self.protocol.behaviour_mut().user_protocol_mut().justification_import_result(who, hash.clone(), number, success); + fn justification_imported( + &mut self, + who: PeerId, + hash: &B::Hash, + number: NumberFor, + success: bool, + ) { + self.protocol.behaviour_mut().user_protocol_mut().justification_import_result( + who, + hash.clone(), + number, + success, + ); } fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.behaviour_mut().user_protocol_mut().request_justification(hash, number) + self.protocol + .behaviour_mut() + .user_protocol_mut() + .request_justification(hash, number) } } @@ -1945,9 +2139,9 @@ fn ensure_addresses_consistent_with_transport<'a>( ) -> Result<(), Error> { if matches!(transport, TransportConfig::MemoryOnly) { let addresses: Vec<_> = addresses - .filter(|x| x.iter() - .any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) - ) + .filter(|x| { + x.iter().any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) + }) .cloned() .collect(); @@ -1955,13 +2149,11 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }); + }) } } else { let addresses: Vec<_> = addresses - .filter(|x| x.iter() - .any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) - ) + .filter(|x| x.iter().any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_)))) .cloned() .collect(); @@ -1969,7 +2161,7 @@ fn ensure_addresses_consistent_with_transport<'a>( return Err(Error::AddressesForAnotherTransport { transport: transport.clone(), addresses, - }); + }) } } diff --git a/substrate/client/network/src/service/metrics.rs b/substrate/client/network/src/service/metrics.rs index 40d65ea45f11128ec3d6155932996b24b5cc08e3..e33cd4b194d69298c8b55b06353b9eb0ed93859f 100644 --- a/substrate/client/network/src/service/metrics.rs +++ b/substrate/client/network/src/service/metrics.rs @@ -18,10 +18,8 @@ use crate::transport::BandwidthSinks; use prometheus_endpoint::{ - self as prometheus, - Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, - PrometheusError, Registry, U64, Opts, - SourcedCounter, SourcedGauge, MetricSource, + self as prometheus, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, MetricSource, Opts, + PrometheusError, Registry, SourcedCounter, SourcedGauge, U64, }; use std::{ str, @@ -267,13 +265,14 @@ impl BandwidthCounters { /// Registers the `BandwidthCounters` metric whose values are /// obtained from the given sinks. fn register(registry: &Registry, sinks: Arc) -> Result<(), PrometheusError> { - prometheus::register(SourcedCounter::new( - &Opts::new( - "sub_libp2p_network_bytes_total", - "Total bandwidth usage" - ).variable_label("direction"), - BandwidthCounters(sinks), - )?, registry)?; + prometheus::register( + SourcedCounter::new( + &Opts::new("sub_libp2p_network_bytes_total", "Total bandwidth usage") + .variable_label("direction"), + BandwidthCounters(sinks), + )?, + registry, + )?; Ok(()) } @@ -296,13 +295,16 @@ impl MajorSyncingGauge { /// Registers the `MajorSyncGauge` metric whose value is /// obtained from the given `AtomicBool`. fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { - prometheus::register(SourcedGauge::new( - &Opts::new( - "sub_libp2p_is_major_syncing", - "Whether the node is performing a major sync or not.", - ), - MajorSyncingGauge(value), - )?, registry)?; + prometheus::register( + SourcedGauge::new( + &Opts::new( + "sub_libp2p_is_major_syncing", + "Whether the node is performing a major sync or not.", + ), + MajorSyncingGauge(value), + )?, + registry, + )?; Ok(()) } @@ -324,13 +326,13 @@ impl NumConnectedGauge { /// Registers the `MajorSyncingGauge` metric whose value is /// obtained from the given `AtomicUsize`. fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { - prometheus::register(SourcedGauge::new( - &Opts::new( - "sub_libp2p_peers_count", - "Number of connected peers", - ), - NumConnectedGauge(value), - )?, registry)?; + prometheus::register( + SourcedGauge::new( + &Opts::new("sub_libp2p_peers_count", "Number of connected peers"), + NumConnectedGauge(value), + )?, + registry, + )?; Ok(()) } diff --git a/substrate/client/network/src/service/out_events.rs b/substrate/client/network/src/service/out_events.rs index 7ec6c608a8fcf0883e6ae66c302a4f50b9130256..fad61491fb2254598f6c525263376db42a70dae2 100644 --- a/substrate/client/network/src/service/out_events.rs +++ b/substrate/client/network/src/service/out_events.rs @@ -30,17 +30,18 @@ //! [`OutChannels::push`] to put the sender within a [`OutChannels`]. //! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the //! collection. -//! use crate::Event; -use futures::{prelude::*, channel::mpsc, ready, stream::FusedStream}; +use futures::{channel::mpsc, prelude::*, ready, stream::FusedStream}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; use std::{ convert::TryFrom as _, - fmt, pin::Pin, sync::Arc, - task::{Context, Poll} + fmt, + pin::Pin, + sync::Arc, + task::{Context, Poll}, }; /// Creates a new channel that can be associated to a [`OutChannels`]. @@ -100,8 +101,10 @@ impl Stream for Receiver { let metrics = self.metrics.lock().clone(); match metrics.as_ref().map(|m| m.as_ref()) { Some(Some(metrics)) => metrics.event_out(&ev, self.name), - Some(None) => (), // no registry - None => log::warn!("Inconsistency in out_events: event happened before sender associated"), + Some(None) => (), // no registry + None => log::warn!( + "Inconsistency in out_events: event happened before sender associated" + ), } Poll::Ready(Some(ev)) } else { @@ -136,16 +139,10 @@ pub struct OutChannels { impl OutChannels { /// Creates a new empty collection of senders. pub fn new(registry: Option<&Registry>) -> Result { - let metrics = if let Some(registry) = registry { - Some(Metrics::register(registry)?) - } else { - None - }; + let metrics = + if let Some(registry) = registry { Some(Metrics::register(registry)?) } else { None }; - Ok(OutChannels { - event_streams: Vec::new(), - metrics: Arc::new(metrics), - }) + Ok(OutChannels { event_streams: Vec::new(), metrics: Arc::new(metrics) }) } /// Adds a new [`Sender`] to the collection. @@ -164,9 +161,8 @@ impl OutChannels { /// Sends an event. pub fn send(&mut self, event: Event) { - self.event_streams.retain(|sender| { - sender.inner.unbounded_send(event.clone()).is_ok() - }); + self.event_streams + .retain(|sender| sender.inner.unbounded_send(event.clone()).is_ok()); if let Some(metrics) = &*self.metrics { for ev in &self.event_streams { @@ -223,20 +219,18 @@ impl Metrics { fn event_in(&self, event: &Event, num: u64, name: &str) { match event { Event::Dht(_) => { - self.events_total - .with_label_values(&["dht", "sent", name]) - .inc_by(num); - } + self.events_total.with_label_values(&["dht", "sent", name]).inc_by(num); + }, Event::SyncConnected { .. } => { self.events_total .with_label_values(&["sync-connected", "sent", name]) .inc_by(num); - } + }, Event::SyncDisconnected { .. } => { self.events_total .with_label_values(&["sync-disconnected", "sent", name]) .inc_by(num); - } + }, Event::NotificationStreamOpened { protocol, .. } => { self.events_total .with_label_values(&[&format!("notif-open-{:?}", protocol), "sent", name]) @@ -247,36 +241,31 @@ impl Metrics { .with_label_values(&[&format!("notif-closed-{:?}", protocol), "sent", name]) .inc_by(num); }, - Event::NotificationsReceived { messages, .. } => { + Event::NotificationsReceived { messages, .. } => for (protocol, message) in messages { self.events_total .with_label_values(&[&format!("notif-{:?}", protocol), "sent", name]) .inc_by(num); - self.notifications_sizes - .with_label_values(&[protocol, "sent", name]) - .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::MAX))); - } - }, + self.notifications_sizes.with_label_values(&[protocol, "sent", name]).inc_by( + num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::MAX)), + ); + }, } } fn event_out(&self, event: &Event, name: &str) { match event { Event::Dht(_) => { - self.events_total - .with_label_values(&["dht", "received", name]) - .inc(); - } + self.events_total.with_label_values(&["dht", "received", name]).inc(); + }, Event::SyncConnected { .. } => { - self.events_total - .with_label_values(&["sync-connected", "received", name]) - .inc(); - } + self.events_total.with_label_values(&["sync-connected", "received", name]).inc(); + }, Event::SyncDisconnected { .. } => { self.events_total .with_label_values(&["sync-disconnected", "received", name]) .inc(); - } + }, Event::NotificationStreamOpened { protocol, .. } => { self.events_total .with_label_values(&[&format!("notif-open-{:?}", protocol), "received", name]) @@ -287,7 +276,7 @@ impl Metrics { .with_label_values(&[&format!("notif-closed-{:?}", protocol), "received", name]) .inc(); }, - Event::NotificationsReceived { messages, .. } => { + Event::NotificationsReceived { messages, .. } => for (protocol, message) in messages { self.events_total .with_label_values(&[&format!("notif-{:?}", protocol), "received", name]) @@ -295,8 +284,7 @@ impl Metrics { self.notifications_sizes .with_label_values(&[&protocol, "received", name]) .inc_by(u64::try_from(message.len()).unwrap_or(u64::MAX)); - } - }, + }, } } } diff --git a/substrate/client/network/src/service/tests.rs b/substrate/client/network/src/service/tests.rs index 4a739e50628a5241353b1f6a9d3c94febd36838b..7acfeadcae13bc9ea4a04c4836d07fb49ee40de7 100644 --- a/substrate/client/network/src/service/tests.rs +++ b/substrate/client/network/src/service/tests.rs @@ -16,13 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{config, Event, NetworkService, NetworkWorker}; -use crate::block_request_handler::BlockRequestHandler; -use crate::state_request_handler::StateRequestHandler; -use crate::light_client_requests::handler::LightClientRequestHandler; +use crate::{ + block_request_handler::BlockRequestHandler, config, + light_client_requests::handler::LightClientRequestHandler, + state_request_handler::StateRequestHandler, Event, NetworkService, NetworkWorker, +}; -use libp2p::PeerId; use futures::prelude::*; +use libp2p::PeerId; use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{borrow::Cow, sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; @@ -37,14 +38,10 @@ type TestNetworkService = NetworkService< /// /// > **Note**: We return the events stream in order to not possibly lose events between the /// > construction of the service and the moment the events stream is grabbed. -fn build_test_full_node(config: config::NetworkConfiguration) - -> (Arc, impl Stream) -{ - let client = Arc::new( - TestClientBuilder::with_default_backend() - .build_with_longest_chain() - .0, - ); +fn build_test_full_node( + config: config::NetworkConfiguration, +) -> (Arc, impl Stream) { + let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); #[derive(Clone)] struct PassThroughVerifier(bool); @@ -69,14 +66,13 @@ fn build_test_full_node(config: config::NetworkConfiguration) .log(|l| { l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) .or_else(|| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( + b"babe", + )) }) }) .map(|blob| { - vec![( - sp_blockchain::well_known_cache_keys::AUTHORITIES, - blob.to_vec(), - )] + vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] }); let mut import = sp_consensus::BlockImportParams::new(origin, header); @@ -99,30 +95,20 @@ fn build_test_full_node(config: config::NetworkConfiguration) let protocol_id = config::ProtocolId::from("/test-protocol-name"); let block_request_protocol_config = { - let (handler, protocol_config) = BlockRequestHandler::new( - &protocol_id, - client.clone(), - 50, - ); + let (handler, protocol_config) = BlockRequestHandler::new(&protocol_id, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { - let (handler, protocol_config) = StateRequestHandler::new( - &protocol_id, - client.clone(), - 50, - ); + let (handler, protocol_config) = StateRequestHandler::new(&protocol_id, client.clone(), 50); async_std::task::spawn(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { - let (handler, protocol_config) = LightClientRequestHandler::new( - &protocol_id, - client.clone(), - ); + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); async_std::task::spawn(handler.run().boxed()); protocol_config }; @@ -130,7 +116,9 @@ fn build_test_full_node(config: config::NetworkConfiguration) let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, - transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), network_config: config, chain: client.clone(), on_demand: None, @@ -162,43 +150,42 @@ const PROTOCOL_NAME: Cow<'static, str> = Cow::Borrowed("/foo"); /// Builds two nodes and their associated events stream. /// The nodes are connected together and have the `PROTOCOL_NAME` protocol registered. -fn build_nodes_one_proto() - -> (Arc, impl Stream, Arc, impl Stream) -{ +fn build_nodes_one_proto() -> ( + Arc, + impl Stream, + Arc, + impl Stream, +) { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - set_config: Default::default() - } - ], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: Default::default(), + }], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), - }], - .. Default::default() - } - } - ], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + ..Default::default() + }, + }], listen_addresses: vec![], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); (node1, events_stream1, node2, events_stream2) @@ -214,10 +201,18 @@ fn notifications_state_consistent() { // Write some initial notifications that shouldn't get through. for _ in 0..(rand::random::() % 5) { - node1.write_notification(node2.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); + node1.write_notification( + node2.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } for _ in 0..(rand::random::() % 5) { - node2.write_notification(node1.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); + node2.write_notification( + node1.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } async_std::task::block_on(async move { @@ -234,16 +229,24 @@ fn notifications_state_consistent() { iterations += 1; if iterations >= 1_000 { assert!(something_happened); - break; + break } // Start by sending a notification from node1 to node2 and vice-versa. Part of the // test consists in ensuring that notifications get ignored if the stream isn't open. if rand::random::() % 5 >= 3 { - node1.write_notification(node2.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); + node1.write_notification( + node2.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } if rand::random::() % 5 >= 3 { - node2.write_notification(node1.local_peer_id().clone(), PROTOCOL_NAME, b"hello world".to_vec()); + node2.write_notification( + node1.local_peer_id().clone(), + PROTOCOL_NAME, + b"hello world".to_vec(), + ); } // Also randomly disconnect the two nodes from time to time. @@ -272,32 +275,40 @@ fn notifications_state_consistent() { }; match next_event { - future::Either::Left(Event::NotificationStreamOpened { remote, protocol, .. }) => { + future::Either::Left(Event::NotificationStreamOpened { + remote, protocol, .. + }) => { something_happened = true; assert!(!node1_to_node2_open); node1_to_node2_open = true; assert_eq!(remote, *node2.local_peer_id()); assert_eq!(protocol, PROTOCOL_NAME); - } - future::Either::Right(Event::NotificationStreamOpened { remote, protocol, .. }) => { + }, + future::Either::Right(Event::NotificationStreamOpened { + remote, protocol, .. + }) => { something_happened = true; assert!(!node2_to_node1_open); node2_to_node1_open = true; assert_eq!(remote, *node1.local_peer_id()); assert_eq!(protocol, PROTOCOL_NAME); - } - future::Either::Left(Event::NotificationStreamClosed { remote, protocol, .. }) => { + }, + future::Either::Left(Event::NotificationStreamClosed { + remote, protocol, .. + }) => { assert!(node1_to_node2_open); node1_to_node2_open = false; assert_eq!(remote, *node2.local_peer_id()); assert_eq!(protocol, PROTOCOL_NAME); - } - future::Either::Right(Event::NotificationStreamClosed { remote, protocol, .. }) => { + }, + future::Either::Right(Event::NotificationStreamClosed { + remote, protocol, .. + }) => { assert!(node2_to_node1_open); node2_to_node1_open = false; assert_eq!(remote, *node1.local_peer_id()); assert_eq!(protocol, PROTOCOL_NAME); - } + }, future::Either::Left(Event::NotificationsReceived { remote, .. }) => { assert!(node1_to_node2_open); assert_eq!(remote, *node2.local_peer_id()); @@ -305,10 +316,10 @@ fn notifications_state_consistent() { node1.write_notification( node2.local_peer_id().clone(), PROTOCOL_NAME, - b"hello world".to_vec() + b"hello world".to_vec(), ); } - } + }, future::Either::Right(Event::NotificationsReceived { remote, .. }) => { assert!(node2_to_node1_open); assert_eq!(remote, *node1.local_peer_id()); @@ -316,18 +327,18 @@ fn notifications_state_consistent() { node2.write_notification( node1.local_peer_id().clone(), PROTOCOL_NAME, - b"hello world".to_vec() + b"hello world".to_vec(), ); } - } + }, // Add new events here. - future::Either::Left(Event::SyncConnected { .. }) => {} - future::Either::Right(Event::SyncConnected { .. }) => {} - future::Either::Left(Event::SyncDisconnected { .. }) => {} - future::Either::Right(Event::SyncDisconnected { .. }) => {} - future::Either::Left(Event::Dht(_)) => {} - future::Either::Right(Event::Dht(_)) => {} + future::Either::Left(Event::SyncConnected { .. }) => {}, + future::Either::Right(Event::SyncConnected { .. }) => {}, + future::Either::Left(Event::SyncDisconnected { .. }) => {}, + future::Either::Right(Event::SyncDisconnected { .. }) => {}, + future::Either::Left(Event::Dht(_)) => {}, + future::Either::Right(Event::Dht(_)) => {}, }; } }); @@ -339,19 +350,14 @@ fn lots_of_incoming_peers_works() { let (main_node, _) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - set_config: config::SetConfig { - in_peers: u32::MAX, - .. Default::default() - }, - } - ], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { in_peers: u32::MAX, ..Default::default() }, + }], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); let main_node_peer_id = main_node.local_peer_id().clone(); @@ -365,22 +371,20 @@ fn lots_of_incoming_peers_works() { let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![], - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr.clone(), - peer_id: main_node_peer_id.clone(), - }], - .. Default::default() - }, - } - ], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr.clone(), + peer_id: main_node_peer_id.clone(), + }], + ..Default::default() + }, + }], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); background_tasks_to_wait.push(async_std::task::spawn(async move { @@ -416,9 +420,7 @@ fn lots_of_incoming_peers_works() { })); } - futures::executor::block_on(async move { - future::join_all(background_tasks_to_wait).await - }); + futures::executor::block_on(async move { future::join_all(background_tasks_to_wait).await }); } #[test] @@ -437,14 +439,13 @@ fn notifications_back_pressure() { while received_notifications < TOTAL_NOTIFS { match events_stream2.next().await.unwrap() { Event::NotificationStreamClosed { .. } => panic!(), - Event::NotificationsReceived { messages, .. } => { + Event::NotificationsReceived { messages, .. } => for message in messages { assert_eq!(message.0, PROTOCOL_NAME); assert_eq!(message.1, format!("hello #{}", received_notifications)); received_notifications += 1; - } - } - _ => {} + }, + _ => {}, }; if rand::random::() < 2 { @@ -458,7 +459,7 @@ fn notifications_back_pressure() { loop { match events_stream1.next().await.unwrap() { Event::NotificationStreamOpened { .. } => break, - _ => {} + _ => {}, }; } @@ -483,37 +484,33 @@ fn fallback_name_working() { let listen_addr = config::build_multiaddr![Memory(rand::random::())]; let (node1, mut events_stream1) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: NEW_PROTOCOL_NAME.clone(), - fallback_names: vec![PROTOCOL_NAME], - max_notification_size: 1024 * 1024, - set_config: Default::default() - } - ], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: NEW_PROTOCOL_NAME.clone(), + fallback_names: vec![PROTOCOL_NAME], + max_notification_size: 1024 * 1024, + set_config: Default::default(), + }], listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); let (_, mut events_stream2) = build_test_full_node(config::NetworkConfiguration { - extra_sets: vec![ - config::NonDefaultSetConfig { - notifications_protocol: PROTOCOL_NAME, - fallback_names: Vec::new(), - max_notification_size: 1024 * 1024, - set_config: config::SetConfig { - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), - }], - .. Default::default() - } - } - ], + extra_sets: vec![config::NonDefaultSetConfig { + notifications_protocol: PROTOCOL_NAME, + fallback_names: Vec::new(), + max_notification_size: 1024 * 1024, + set_config: config::SetConfig { + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + ..Default::default() + }, + }], listen_addresses: vec![], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() + ..config::NetworkConfiguration::new_local() }); let receiver = async_std::task::spawn(async move { @@ -525,7 +522,7 @@ fn fallback_name_working() { assert_eq!(negotiated_fallback, None); break }, - _ => {} + _ => {}, }; } }); @@ -539,7 +536,7 @@ fn fallback_name_working() { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); break }, - _ => {} + _ => {}, }; } @@ -555,7 +552,7 @@ fn ensure_listen_addresses_consistent_with_transport_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -566,7 +563,7 @@ fn ensure_listen_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -583,7 +580,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_memory() { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, boot_nodes: vec![boot_node], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -599,7 +596,7 @@ fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], boot_nodes: vec![boot_node], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -617,9 +614,9 @@ fn ensure_reserved_node_addresses_consistent_with_transport_memory() { transport: config::TransportConfig::MemoryOnly, default_peers_set: config::SetConfig { reserved_nodes: vec![reserved_node], - .. Default::default() + ..Default::default() }, - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -636,9 +633,9 @@ fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { listen_addresses: vec![listen_addr.clone()], default_peers_set: config::SetConfig { reserved_nodes: vec![reserved_node], - .. Default::default() + ..Default::default() }, - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -652,7 +649,7 @@ fn ensure_public_addresses_consistent_with_transport_memory() { listen_addresses: vec![listen_addr.clone()], transport: config::TransportConfig::MemoryOnly, public_addresses: vec![public_address], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } @@ -665,6 +662,6 @@ fn ensure_public_addresses_consistent_with_transport_not_memory() { let _ = build_test_full_node(config::NetworkConfiguration { listen_addresses: vec![listen_addr.clone()], public_addresses: vec![public_address], - .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + ..config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) }); } diff --git a/substrate/client/network/src/state_request_handler.rs b/substrate/client/network/src/state_request_handler.rs index d340ff21bd449329f7f2e7e60b6d37b06980c621..a15ee246a2ef8876791334466077509300386231 100644 --- a/substrate/client/network/src/state_request_handler.rs +++ b/substrate/client/network/src/state_request_handler.rs @@ -17,22 +17,27 @@ //! Helper for handling (i.e. answering) state requests from a remote peer via the //! [`crate::request_responses::RequestResponsesBehaviour`]. -use codec::{Encode, Decode}; -use crate::chain::Client; -use crate::config::ProtocolId; -use crate::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; -use crate::schema::v1::{StateResponse, StateRequest, StateEntry}; -use crate::{PeerId, ReputationChange}; -use futures::channel::{mpsc, oneshot}; -use futures::stream::StreamExt; +use crate::{ + chain::Client, + config::ProtocolId, + request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + schema::v1::{StateEntry, StateRequest, StateResponse}, + PeerId, ReputationChange, +}; +use codec::{Decode, Encode}; +use futures::{ + channel::{mpsc, oneshot}, + stream::StreamExt, +}; use log::debug; use lru::LruCache; use prost::Message; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::Block as BlockT; -use std::sync::Arc; -use std::time::Duration; -use std::hash::{Hasher, Hash}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use std::{ + hash::{Hash, Hasher}, + sync::Arc, + time::Duration, +}; const LOG_TARGET: &str = "sync"; const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger. @@ -127,9 +132,7 @@ impl StateRequestHandler { Ok(()) => debug!(target: LOG_TARGET, "Handled block request from {}.", peer), Err(e) => debug!( target: LOG_TARGET, - "Failed to handle state request from {}: {}", - peer, - e, + "Failed to handle state request from {}: {}", peer, e, ), } } @@ -144,11 +147,8 @@ impl StateRequestHandler { let request = StateRequest::decode(&payload[..])?; let block: B::Hash = Decode::decode(&mut request.block.as_ref())?; - let key = SeenRequestsKey { - peer: *peer, - block: block.clone(), - start: request.start.clone(), - }; + let key = + SeenRequestsKey { peer: *peer, block: block.clone(), start: request.start.clone() }; let mut reputation_changes = Vec::new(); @@ -163,7 +163,7 @@ impl StateRequestHandler { }, None => { self.seen_requests.put(key.clone(), SeenRequestsValue::First); - } + }, } log::trace!( @@ -194,7 +194,8 @@ impl StateRequestHandler { &request.start, MAX_RESPONSE_BYTES, )?; - response.entries = entries.into_iter().map(|(key, value)| StateEntry { key, value }).collect(); + response.entries = + entries.into_iter().map(|(key, value)| StateEntry { key, value }).collect(); if response.entries.is_empty() { response.complete = true; } @@ -224,11 +225,9 @@ impl StateRequestHandler { Err(()) }; - pending_response.send(OutgoingResponse { - result, - reputation_changes, - sent_feedback: None, - }).map_err(|_| HandleRequestError::SendResponse) + pending_response + .send(OutgoingResponse { result, reputation_changes, sent_feedback: None }) + .map_err(|_| HandleRequestError::SendResponse) } } diff --git a/substrate/client/network/src/transactions.rs b/substrate/client/network/src/transactions.rs index 8a7dd78c834ce875de57906aeab93e5184dca76e..82e7e8fe1714c637f0a3530b22461f509a588b5e 100644 --- a/substrate/client/network/src/transactions.rs +++ b/substrate/client/network/src/transactions.rs @@ -25,26 +25,35 @@ //! configuration as an extra peers set. //! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a //! `Future` that processes transactions. -//! use crate::{ - ExHashT, Event, ObservedRole, - config::{self, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, - error, protocol::message, service::NetworkService, utils::{interval, LruHashSet}, + config::{self, ProtocolId, TransactionImport, TransactionImportFuture, TransactionPool}, + error, + protocol::message, + service::NetworkService, + utils::{interval, LruHashSet}, + Event, ExHashT, ObservedRole, }; use codec::{Decode, Encode}; use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; -use log::{trace, debug, warn}; -use prometheus_endpoint::{ - Registry, Counter, PrometheusError, register, U64 -}; +use log::{debug, trace, warn}; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sp_runtime::traits::Block as BlockT; -use std::borrow::Cow; -use std::collections::{HashMap, hash_map::Entry}; -use std::sync::{atomic::{AtomicBool, Ordering}, Arc}; -use std::{iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; +use std::{ + borrow::Cow, + collections::{hash_map::Entry, HashMap}, + iter, + num::NonZeroUsize, + pin::Pin, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + task::Poll, + time, +}; /// Interval at which we propagate transactions; const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); @@ -84,10 +93,13 @@ struct Metrics { impl Metrics { fn register(r: &Registry) -> Result { Ok(Metrics { - propagated_transactions: register(Counter::new( - "sync_propagated_transactions", - "Number of transactions propagated to at least one peer", - )?, r)?, + propagated_transactions: register( + Counter::new( + "sync_propagated_transactions", + "Number of transactions propagated to at least one peer", + )?, + r, + )?, }) } } @@ -106,7 +118,7 @@ impl Future for PendingTransaction { let mut this = self.project(); if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.tx_hash.clone(), import_result)); + return Poll::Ready((this.tx_hash.clone(), import_result)) } Poll::Pending @@ -128,7 +140,7 @@ impl TransactionsHandlerPrototype { proto.push_str(protocol_id.as_ref()); proto.push_str("/transactions/1"); proto - }) + }), } } @@ -143,7 +155,7 @@ impl TransactionsHandlerPrototype { out_peers: 0, reserved_nodes: Vec::new(), non_reserved_mode: config::NonReservedPeerMode::Deny, - } + }, } } @@ -182,10 +194,7 @@ impl TransactionsHandlerPrototype { }, }; - let controller = TransactionsHandlerController { - to_handler, - gossip_enabled, - }; + let controller = TransactionsHandlerController { to_handler, gossip_enabled }; Ok((handler, controller)) } @@ -264,7 +273,7 @@ impl TransactionsHandler { /// interrupted. pub async fn run(mut self) { loop { - futures::select!{ + futures::select! { _ = self.propagate_timeout.next().fuse() => { self.propagate_transactions(); }, @@ -301,7 +310,7 @@ impl TransactionsHandler { .collect::(); let result = self.service.add_peers_to_reserved_set( self.protocol_name.clone(), - iter::once(addr).collect() + iter::once(addr).collect(), ); if let Err(err) = result { log::error!(target: "sync", "Add reserved peer failed: {}", err); @@ -312,22 +321,30 @@ impl TransactionsHandler { .collect::(); let result = self.service.remove_peers_from_reserved_set( self.protocol_name.clone(), - iter::once(addr).collect() + iter::once(addr).collect(), ); if let Err(err) = result { log::error!(target: "sync", "Removing reserved peer failed: {}", err); } }, - Event::NotificationStreamOpened { remote, protocol, role, .. } if protocol == self.protocol_name => { - let _was_in = self.peers.insert(remote, Peer { - known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) - .expect("Constant is nonzero")), - role, - }); + Event::NotificationStreamOpened { remote, protocol, role, .. } + if protocol == self.protocol_name => + { + let _was_in = self.peers.insert( + remote, + Peer { + known_transactions: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS).expect("Constant is nonzero"), + ), + role, + }, + ); debug_assert!(_was_in.is_none()); } - Event::NotificationStreamClosed { remote, protocol } if protocol == self.protocol_name => { + Event::NotificationStreamClosed { remote, protocol } + if protocol == self.protocol_name => + { let _peer = self.peers.remove(&remote); debug_assert!(_peer.is_some()); } @@ -335,7 +352,7 @@ impl TransactionsHandler { Event::NotificationsReceived { remote, messages } => { for (protocol, message) in messages { if protocol != self.protocol_name { - continue; + continue } if let Ok(m) = as Decode>::decode( @@ -349,28 +366,24 @@ impl TransactionsHandler { }, // Not our concern. - Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {} + Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}, } } /// Called when peer sends us new transactions - fn on_transactions( - &mut self, - who: PeerId, - transactions: message::Transactions, - ) { + fn on_transactions(&mut self, who: PeerId, transactions: message::Transactions) { // sending transaction to light node is considered a bad behavior if matches!(self.local_role, config::Role::Light) { debug!(target: "sync", "Peer {} is trying to send transactions to the light node", who); self.service.disconnect_peer(who, self.protocol_name.clone()); self.service.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); - return; + return } // Accept transactions only when enabled if !self.gossip_enabled.load(Ordering::Relaxed) { trace!(target: "sync", "{} Ignoring transactions while disabled", who); - return; + return } trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); @@ -382,7 +395,7 @@ impl TransactionsHandler { "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", MAX_PENDING_TRANSACTIONS, ); - break; + break } let hash = self.transaction_pool.hash_of(&t); @@ -400,7 +413,7 @@ impl TransactionsHandler { }, Entry::Occupied(mut entry) => { entry.get_mut().push(who.clone()); - } + }, } } } @@ -408,7 +421,8 @@ impl TransactionsHandler { fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { match import { - TransactionImport::KnownGood => self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND), + TransactionImport::KnownGood => + self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND), TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION), TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION), TransactionImport::None => {}, @@ -416,14 +430,11 @@ impl TransactionsHandler { } /// Propagate one transaction. - pub fn propagate_transaction( - &mut self, - hash: &H, - ) { + pub fn propagate_transaction(&mut self, hash: &H) { debug!(target: "sync", "Propagating transaction [{:?}]", hash); // Accept transactions only when enabled if !self.gossip_enabled.load(Ordering::Relaxed) { - return; + return } if let Some(transaction) = self.transaction_pool.transaction(hash) { let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); @@ -441,7 +452,7 @@ impl TransactionsHandler { for (who, peer) in self.peers.iter_mut() { // never send transactions to the light node if matches!(peer.role, ObservedRole::Light) { - continue; + continue } let (hashes, to_send): (Vec<_>, Vec<_>) = transactions @@ -454,16 +465,13 @@ impl TransactionsHandler { if !to_send.is_empty() { for hash in hashes { - propagated_to - .entry(hash) - .or_default() - .push(who.to_base58()); + propagated_to.entry(hash).or_default().push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); self.service.write_notification( who.clone(), self.protocol_name.clone(), - to_send.encode() + to_send.encode(), ); } } @@ -479,7 +487,7 @@ impl TransactionsHandler { fn propagate_transactions(&mut self) { // Accept transactions only when enabled if !self.gossip_enabled.load(Ordering::Relaxed) { - return; + return } debug!(target: "sync", "Propagating transactions"); let transactions = self.transaction_pool.transactions(); diff --git a/substrate/client/network/src/transport.rs b/substrate/client/network/src/transport.rs index ab587e01a875bd8f1e812a8464a448174822ef2e..710d4775993b0d74a0762e27de6b39dd5f67d557 100644 --- a/substrate/client/network/src/transport.rs +++ b/substrate/client/network/src/transport.rs @@ -17,15 +17,18 @@ // along with this program. If not, see . use libp2p::{ - PeerId, Transport, + bandwidth, core::{ - self, either::EitherTransport, muxing::StreamMuxerBox, - transport::{Boxed, OptionalTransport}, upgrade + self, + either::EitherTransport, + muxing::StreamMuxerBox, + transport::{Boxed, OptionalTransport}, + upgrade, }, - mplex, identity, bandwidth, wasm_ext, noise + identity, mplex, noise, wasm_ext, PeerId, Transport, }; #[cfg(not(target_os = "unknown"))] -use libp2p::{tcp, dns, websocket}; +use libp2p::{dns, tcp, websocket}; use std::{sync::Arc, time::Duration}; pub use self::bandwidth::BandwidthSinks; @@ -61,8 +64,8 @@ pub fn build_transport( #[cfg(not(target_os = "unknown"))] let transport = transport.or_transport(if !memory_only { let desktop_trans = tcp::TcpConfig::new().nodelay(true); - let desktop_trans = websocket::WsConfig::new(desktop_trans.clone()) - .or_transport(desktop_trans); + let desktop_trans = + websocket::WsConfig::new(desktop_trans.clone()).or_transport(desktop_trans); let dns_init = futures::executor::block_on(dns::DnsConfig::system(desktop_trans.clone())); OptionalTransport::some(if let Ok(dns) = dns_init { EitherTransport::Left(dns) @@ -81,23 +84,24 @@ pub fn build_transport( let (transport, bandwidth) = bandwidth::BandwidthLogging::new(transport); - let authentication_config = { - // For more information about these two panics, see in "On the Importance of - // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, - // and Richard J. Lipton. - let noise_keypair = noise::Keypair::::new().into_authentic(&keypair) + let authentication_config = + { + // For more information about these two panics, see in "On the Importance of + // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, + // and Richard J. Lipton. + let noise_keypair = noise::Keypair::::new().into_authentic(&keypair) .expect("can only fail in case of a hardware bug; since this signing is performed only \ once and at initialization, we're taking the bet that the inconvenience of a very \ rare panic here is basically zero"); - // Legacy noise configurations for backward compatibility. - let mut noise_legacy = noise::LegacyConfig::default(); - noise_legacy.recv_legacy_handshake = true; + // Legacy noise configurations for backward compatibility. + let mut noise_legacy = noise::LegacyConfig::default(); + noise_legacy.recv_legacy_handshake = true; - let mut xx_config = noise::NoiseConfig::xx(noise_keypair); - xx_config.set_legacy_config(noise_legacy.clone()); - xx_config.into_authenticated() - }; + let mut xx_config = noise::NoiseConfig::xx(noise_keypair); + xx_config.set_legacy_config(noise_legacy.clone()); + xx_config.into_authenticated() + }; let multiplexing_config = { let mut mplex_config = mplex::MplexConfig::new(); @@ -117,7 +121,8 @@ pub fn build_transport( core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) }; - let transport = transport.upgrade(upgrade::Version::V1Lazy) + let transport = transport + .upgrade(upgrade::Version::V1Lazy) .authenticate(authentication_config) .multiplex(multiplexing_config) .timeout(Duration::from_secs(20)) diff --git a/substrate/client/network/src/utils.rs b/substrate/client/network/src/utils.rs index 02673ef49fb4c4d9d98a3b0181af8e66859e7159..b23b7e0c101e0bf25bda4d94072cea80e6711f23 100644 --- a/substrate/client/network/src/utils.rs +++ b/substrate/client/network/src/utils.rs @@ -19,8 +19,7 @@ use futures::{stream::unfold, FutureExt, Stream, StreamExt}; use futures_timer::Delay; use linked_hash_set::LinkedHashSet; -use std::time::Duration; -use std::{hash::Hash, num::NonZeroUsize}; +use std::{hash::Hash, num::NonZeroUsize, time::Duration}; /// Creates a stream that returns a new value every `duration`. pub fn interval(duration: Duration) -> impl Stream + Unpin { @@ -39,10 +38,7 @@ pub struct LruHashSet { impl LruHashSet { /// Create a new `LruHashSet` with the given (exclusive) limit. pub fn new(limit: NonZeroUsize) -> Self { - Self { - set: LinkedHashSet::new(), - limit, - } + Self { set: LinkedHashSet::new(), limit } } /// Insert element into the set. @@ -55,7 +51,7 @@ impl LruHashSet { if self.set.len() == usize::from(self.limit) { self.set.pop_front(); // remove oldest entry } - return true; + return true } false } diff --git a/substrate/client/network/test/src/block_import.rs b/substrate/client/network/test/src/block_import.rs index 6d3ceb4a933d8bf086c6ab833f96b045fe85a44d..4593e06250d369642bffab7dd3d6102d536cf812 100644 --- a/substrate/client/network/test/src/block_import.rs +++ b/substrate/client/network/test/src/block_import.rs @@ -18,16 +18,21 @@ //! Testing block import logic. -use sp_consensus::ImportedAux; -use sp_consensus::import_queue::{ - import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, +use super::*; +use futures::executor::block_on; +use sc_block_builder::BlockBuilderProvider; +use sp_consensus::{ + import_queue::{ + import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, + }, + ImportedAux, }; -use substrate_test_runtime_client::{self, prelude::*}; -use substrate_test_runtime_client::runtime::{Block, Hash}; use sp_runtime::generic::BlockId; -use sc_block_builder::BlockBuilderProvider; -use futures::executor::block_on; -use super::*; +use substrate_test_runtime_client::{ + self, + prelude::*, + runtime::{Block, Hash}, +}; fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { let mut client = substrate_test_runtime_client::new(); @@ -38,18 +43,24 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) let header = client.header(&BlockId::Number(1)).unwrap(); let justifications = client.justifications(&BlockId::Number(1)).unwrap(); let peer_id = PeerId::random(); - (client, hash, number, peer_id.clone(), IncomingBlock { + ( + client, hash, - header, - body: Some(Vec::new()), - indexed_body: None, - justifications, - origin: Some(peer_id.clone()), - allow_missing_state: false, - import_existing: false, - state: None, - skip_execution: false, - }) + number, + peer_id.clone(), + IncomingBlock { + hash, + header, + body: Some(Vec::new()), + indexed_body: None, + justifications, + origin: Some(peer_id.clone()), + allow_missing_state: false, + import_existing: false, + state: None, + skip_execution: false, + }, + ) } #[test] @@ -63,11 +74,11 @@ fn import_single_good_block_works() { &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier::new(true) + &mut PassThroughVerifier::new(true), )) { Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) - if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} - r @ _ => panic!("{:?}", r) + if *num == number && *aux == expected_aux && *org == Some(peer_id) => {}, + r @ _ => panic!("{:?}", r), } } @@ -78,10 +89,10 @@ fn import_single_good_known_block_is_ignored() { &mut client, BlockOrigin::File, block, - &mut PassThroughVerifier::new(true) + &mut PassThroughVerifier::new(true), )) { - Ok(BlockImportResult::ImportedKnown(ref n, _)) if *n == number => {} - _ => panic!() + Ok(BlockImportResult::ImportedKnown(ref n, _)) if *n == number => {}, + _ => panic!(), } } @@ -93,10 +104,10 @@ fn import_single_good_block_without_header_fails() { &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier::new(true) + &mut PassThroughVerifier::new(true), )) { - Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} - _ => panic!() + Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {}, + _ => panic!(), } } diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 900e05e26a78f0559f519d7efc80915f21958c09..0bdaa0d14e4fb68e371f745bdfbb7b2b5f45b4fb 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -23,52 +23,58 @@ mod block_import; mod sync; use std::{ - borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, task::{Poll, Context as FutureContext} + borrow::Cow, + collections::HashMap, + pin::Pin, + sync::Arc, + task::{Context as FutureContext, Poll}, }; -use libp2p::build_multiaddr; +use futures::{future::BoxFuture, prelude::*}; +use libp2p::{build_multiaddr, PeerId}; use log::trace; -use sc_network::block_request_handler::{self, BlockRequestHandler}; -use sc_network::state_request_handler::{self, StateRequestHandler}; -use sc_network::light_client_requests::{self, handler::LightClientRequestHandler}; -use sp_blockchain::{ - HeaderBackend, Result as ClientResult, - well_known_cache_keys::{self, Id as CacheKeyId}, - Info as BlockchainInfo, -}; +use parking_lot::Mutex; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{ - BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, - backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend, + backend::{AuxStore, Backend, Finalizer, TransactionFor}, + BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, + FinalityNotifications, ImportNotifications, }; use sc_consensus::LongestChain; -use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sc_network::config::Role; -use sp_consensus::block_validation::{DefaultBlockAnnounceValidator, BlockAnnounceValidator}; -use sp_consensus::import_queue::{ - BasicQueue, BoxJustificationImport, Verifier, -}; -use sp_consensus::block_import::{BlockImport, ImportResult}; -use sp_consensus::Error as ConsensusError; -use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; -use futures::prelude::*; -use futures::future::BoxFuture; +pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - NetworkWorker, NetworkService, config::{ProtocolId, MultiaddrWithPeerId, NonReservedPeerMode}, - Multiaddr, + block_request_handler::{self, BlockRequestHandler}, + config::{ + MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, + ProtocolConfig, ProtocolId, Role, SyncMode, TransportConfig, + }, + light_client_requests::{self, handler::LightClientRequestHandler}, + state_request_handler::{self, StateRequestHandler}, + Multiaddr, NetworkService, NetworkWorker, +}; +use sc_service::client::Client; +use sp_blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + HeaderBackend, Info as BlockchainInfo, Result as ClientResult, +}; +use sp_consensus::{ + block_import::{BlockImport, ImportResult}, + block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, + import_queue::{BasicQueue, BoxJustificationImport, Verifier}, + BlockCheckParams, BlockImportParams, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, + JustificationImport, }; -use sc_network::config::{NetworkConfiguration, NonDefaultSetConfig, TransportConfig, SyncMode}; -use libp2p::PeerId; -use parking_lot::Mutex; use sp_core::H256; -use sc_network::config::ProtocolConfig; -use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::{Justification, Justifications}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + Justification, Justifications, +}; use substrate_test_runtime_client::AccountKeyring; -use sc_service::client::Client; -pub use sc_network::config::EmptyTransactionPool; -pub use substrate_test_runtime_client::runtime::{Block, Extrinsic, Hash, Transfer}; -pub use substrate_test_runtime_client::{TestClient, TestClientBuilder, TestClientBuilderExt}; +pub use substrate_test_runtime_client::{ + runtime::{Block, Extrinsic, Hash, Transfer}, + TestClient, TestClientBuilder, TestClientBuilderExt, +}; type AuthorityId = sp_consensus_babe::AuthorityId; @@ -85,10 +91,7 @@ impl PassThroughVerifier { /// /// Every verified block will use `finalized` for the `BlockImportParams`. pub fn new(finalized: bool) -> Self { - Self { - finalized, - fork_choice: ForkChoiceStrategy::LongestChain, - } + Self { finalized, fork_choice: ForkChoiceStrategy::LongestChain } } /// Create a new instance. @@ -96,10 +99,7 @@ impl PassThroughVerifier { /// Every verified block will use `finalized` for the `BlockImportParams` and /// the given [`ForkChoiceStrategy`]. pub fn new_with_fork_choice(finalized: bool, fork_choice: ForkChoiceStrategy) -> Self { - Self { - finalized, - fork_choice, - } + Self { finalized, fork_choice } } } @@ -111,12 +111,14 @@ impl Verifier for PassThroughVerifier { origin: BlockOrigin, header: B::Header, justifications: Option, - body: Option> + body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { - let maybe_keys = header.digest() - .log(|l| l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) - ) + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) + }) .map(|blob| vec![(well_known_cache_keys::AUTHORITIES, blob.to_vec())]); let mut import = BlockImportParams::new(origin, header); import.body = body; @@ -132,13 +134,13 @@ pub type PeersFullClient = Client< substrate_test_runtime_client::Backend, substrate_test_runtime_client::Executor, Block, - substrate_test_runtime_client::runtime::RuntimeApi + substrate_test_runtime_client::runtime::RuntimeApi, >; pub type PeersLightClient = Client< substrate_test_runtime_client::LightBackend, substrate_test_runtime_client::LightExecutor, Block, - substrate_test_runtime_client::runtime::RuntimeApi + substrate_test_runtime_client::runtime::RuntimeApi, >; #[derive(Clone)] @@ -173,7 +175,10 @@ impl PeersClient { } } - pub fn header(&self, block: &BlockId) -> ClientResult::Header>> { + pub fn header( + &self, + block: &BlockId, + ) -> ClientResult::Header>> { match *self { PeersClient::Full(ref client, ref _backend) => client.header(block), PeersClient::Light(ref client, ref _backend) => client.header(block), @@ -207,7 +212,7 @@ impl PeersClient { } } - pub fn import_notification_stream(&self) -> ImportNotifications{ + pub fn import_notification_stream(&self) -> ImportNotifications { match *self { PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), @@ -218,11 +223,13 @@ impl PeersClient { &self, id: BlockId, justification: Option, - notify: bool + notify: bool, ) -> ClientResult<()> { match *self { - PeersClient::Full(ref client, ref _backend) => client.finalize_block(id, justification, notify), - PeersClient::Light(ref client, ref _backend) => client.finalize_block(id, justification, notify), + PeersClient::Full(ref client, ref _backend) => + client.finalize_block(id, justification, notify), + PeersClient::Light(ref client, ref _backend) => + client.finalize_block(id, justification, notify), } } } @@ -273,7 +280,8 @@ pub struct Peer { listen_addr: Multiaddr, } -impl Peer where +impl Peer +where B: BlockImport + Send + Sync, B::Transaction: Send, { @@ -288,7 +296,9 @@ impl Peer where } // Returns a clone of the local SelectChain, only available on full nodes - pub fn select_chain(&self) -> Option> { + pub fn select_chain( + &self, + ) -> Option> { self.select_chain.clone() } @@ -328,17 +338,22 @@ impl Peer where } /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks( - &mut self, - count: usize, - origin: BlockOrigin, - edit_block: F, - ) -> H256 - where - F: FnMut(BlockBuilder) -> Block + pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 + where + F: FnMut( + BlockBuilder, + ) -> Block, { let best_hash = self.client.info().best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false, true, true) + self.generate_blocks_at( + BlockId::Hash(best_hash), + count, + origin, + edit_block, + false, + true, + true, + ) } /// Add blocks to the peer -- edit the block before adding. The chain will @@ -352,16 +367,18 @@ impl Peer where headers_only: bool, inform_sync_about_new_best_block: bool, announce_block: bool, - ) -> H256 where F: FnMut(BlockBuilder) -> Block { - let full_client = self.client.as_full() - .expect("blocks could only be generated by full clients"); + ) -> H256 + where + F: FnMut( + BlockBuilder, + ) -> Block, + { + let full_client = + self.client.as_full().expect("blocks could only be generated by full clients"); let mut at = full_client.header(&at).unwrap().unwrap().hash(); - for _ in 0..count { - let builder = full_client.new_block_at( - &BlockId::Hash(at), - Default::default(), - false, - ).unwrap(); + for _ in 0..count { + let builder = + full_client.new_block_at(&BlockId::Hash(at), Default::default(), false).unwrap(); let block = edit_block(builder); let hash = block.header.hash(); trace!( @@ -377,16 +394,16 @@ impl Peer where header.clone(), None, if headers_only { None } else { Some(block.extrinsics) }, - )).unwrap(); + )) + .unwrap(); let cache = if let Some(cache) = cache { cache.into_iter().collect() } else { Default::default() }; - futures::executor::block_on( - self.block_import.import_block(import_block, cache) - ).expect("block_import failed"); + futures::executor::block_on(self.block_import.import_block(import_block, cache)) + .expect("block_import failed"); if announce_block { self.network.service().announce_block(hash, None); } @@ -458,7 +475,8 @@ impl Peer where self.generate_blocks_at( at, count, - BlockOrigin::File, |mut builder| { + BlockOrigin::File, + |mut builder| { let transfer = Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Alice.into(), @@ -520,9 +538,10 @@ impl Peer where /// Count the total number of imported blocks. pub fn blocks_count(&self) -> u64 { - self.backend.as_ref().map( - |backend| backend.blockchain().info().best_number - ).unwrap_or(0) + self.backend + .as_ref() + .map(|backend| backend.blockchain().info().best_number) + .unwrap_or(0) } /// Return a collection of block hashes that failed verification @@ -531,9 +550,10 @@ impl Peer where } pub fn has_block(&self, hash: &H256) -> bool { - self.backend.as_ref().map( - |backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some() - ).unwrap_or(false) + self.backend + .as_ref() + .map(|backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some()) + .unwrap_or(false) } } @@ -542,22 +562,22 @@ pub trait BlockImportAdapterFull: Block, Transaction = TransactionFor, Error = ConsensusError, - > + - Send + - Sync + - Clone -{} + > + Send + + Sync + + Clone +{ +} impl BlockImportAdapterFull for T where T: BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError, - > + - Send + - Sync + - Clone -{} + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + Send + + Sync + + Clone +{ +} /// Implements `BlockImport` for any `Transaction`. Internally the transaction is /// "converted", aka the field is set to `None`. @@ -572,14 +592,13 @@ pub struct BlockImportAdapter { impl BlockImportAdapter { /// Create a new instance of `Self::Full`. pub fn new(inner: I) -> Self { - Self { - inner, - } + Self { inner } } } #[async_trait::async_trait] -impl BlockImport for BlockImportAdapter where +impl BlockImport for BlockImportAdapter +where I: BlockImport + Send + Sync, I::Transaction: Send, { @@ -615,13 +634,18 @@ impl Verifier for VerifierAdapter { origin: BlockOrigin, header: B::Header, justifications: Option, - body: Option> + body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { let hash = header.hash(); - self.verifier.lock().await.verify(origin, header, justifications, body).await.map_err(|e| { - self.failed_verifications.lock().insert(hash, e.clone()); - e - }) + self.verifier + .lock() + .await + .verify(origin, header, justifications, body) + .await + .map_err(|e| { + self.failed_verifications.lock().insert(hash, e.clone()); + e + }) } } @@ -664,7 +688,10 @@ pub struct FullPeerConfig { pub storage_chain: bool, } -pub trait TestNetFactory: Sized where >::Transaction: Send { +pub trait TestNetFactory: Sized +where + >::Transaction: Send, +{ type Verifier: 'static + Verifier; type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default; @@ -687,12 +714,14 @@ pub trait TestNetFactory: Sized where >: ); /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ); + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ); fn default_config() -> ProtocolConfig { ProtocolConfig::default() @@ -723,18 +752,15 @@ pub trait TestNetFactory: Sized where >: (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), (None, false) => TestClientBuilder::with_default_backend(), }; - if matches!(config.sync_mode, SyncMode::Fast{..}) { + if matches!(config.sync_mode, SyncMode::Fast { .. }) { test_client_builder = test_client_builder.set_no_genesis(); } let backend = test_client_builder.backend(); let (c, longest_chain) = test_client_builder.build_with_longest_chain(); let client = Arc::new(c); - let ( - block_import, - justification_import, - data, - ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); + let (block_import, justification_import, data) = + self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); let verifier = self.make_verifier( PeersClient::Full(client.clone(), backend.clone()), @@ -753,30 +779,31 @@ pub trait TestNetFactory: Sized where >: let listen_addr = build_multiaddr![Memory(rand::random::())]; - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); + let mut network_config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); network_config.sync_mode = config.sync_mode; network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; - network_config.extra_sets = config.notifications_protocols.into_iter().map(|p| { - NonDefaultSetConfig { + network_config.extra_sets = config + .notifications_protocols + .into_iter() + .map(|p| NonDefaultSetConfig { notifications_protocol: p, fallback_names: Vec::new(), max_notification_size: 1024 * 1024, - set_config: Default::default() - } - }).collect(); + set_config: Default::default(), + }) + .collect(); if let Some(connect_to) = config.connect_to_peers { - let addrs = connect_to.iter().map(|v| { - let peer_id = self.peer(*v).network_service().local_peer_id().clone(); - let multiaddr = self.peer(*v).listen_addr.clone(); - MultiaddrWithPeerId { peer_id, multiaddr } - }).collect(); + let addrs = connect_to + .iter() + .map(|v| { + let peer_id = self.peer(*v).network_service().local_peer_id().clone(); + let multiaddr = self.peer(*v).listen_addr.clone(); + MultiaddrWithPeerId { peer_id, multiaddr } + }) + .collect(); network_config.default_peers_set.reserved_nodes = addrs; network_config.default_peers_set.non_reserved_mode = NonReservedPeerMode::Deny; } @@ -784,27 +811,22 @@ pub trait TestNetFactory: Sized where >: let protocol_id = ProtocolId::from("test-protocol-name"); let block_request_protocol_config = { - let (handler, protocol_config) = BlockRequestHandler::new( - &protocol_id, - client.clone(), - 50, - ); + let (handler, protocol_config) = + BlockRequestHandler::new(&protocol_id, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let state_request_protocol_config = { - let (handler, protocol_config) = StateRequestHandler::new( - &protocol_id, - client.clone(), - 50, - ); + let (handler, protocol_config) = + StateRequestHandler::new(&protocol_id, client.clone(), 50); self.spawn_task(handler.run().boxed()); protocol_config }; let light_client_request_protocol_config = { - let (handler, protocol_config) = LightClientRequestHandler::new(&protocol_id, client.clone()); + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); self.spawn_task(handler.run().boxed()); protocol_config }; @@ -812,20 +834,24 @@ pub trait TestNetFactory: Sized where >: let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, - transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), network_config, chain: client.clone(), on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, import_queue, - block_announce_validator: config.block_announce_validator + block_announce_validator: config + .block_announce_validator .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), metrics_registry: None, block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, - }).unwrap(); + }) + .unwrap(); trace!(target: "test_network", "Peer identifier: {}", network.service().local_peer_id()); @@ -838,7 +864,8 @@ pub trait TestNetFactory: Sized where >: } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); + let finality_notification_stream = + Box::pin(client.finality_notification_stream().fuse()); peers.push(Peer { data, @@ -859,11 +886,8 @@ pub trait TestNetFactory: Sized where >: fn add_light_peer(&mut self) { let (c, backend) = substrate_test_runtime_client::new_light(); let client = Arc::new(c); - let ( - block_import, - justification_import, - data, - ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); + let (block_import, justification_import, data) = + self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); let verifier = self.make_verifier( PeersClient::Light(client.clone(), backend.clone()), @@ -882,24 +906,18 @@ pub trait TestNetFactory: Sized where >: let listen_addr = build_multiaddr![Memory(rand::random::())]; - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); + let mut network_config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); network_config.transport = TransportConfig::MemoryOnly; network_config.listen_addresses = vec![listen_addr.clone()]; network_config.allow_non_globals_in_dht = true; let protocol_id = ProtocolId::from("test-protocol-name"); - let block_request_protocol_config = block_request_handler::generate_protocol_config( - &protocol_id, - ); - let state_request_protocol_config = state_request_handler::generate_protocol_config( - &protocol_id, - ); + let block_request_protocol_config = + block_request_handler::generate_protocol_config(&protocol_id); + let state_request_protocol_config = + state_request_handler::generate_protocol_config(&protocol_id); let light_client_request_protocol_config = light_client_requests::generate_protocol_config(&protocol_id); @@ -907,7 +925,9 @@ pub trait TestNetFactory: Sized where >: let network = NetworkWorker::new(sc_network::config::Params { role: Role::Light, executor: None, - transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), + transactions_handler_executor: Box::new(|task| { + async_std::task::spawn(task); + }), network_config, chain: client.clone(), on_demand: None, @@ -919,15 +939,20 @@ pub trait TestNetFactory: Sized where >: block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, - }).unwrap(); + }) + .unwrap(); self.mut_peers(|peers| { for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); + peer.network.add_known_address( + network.service().local_peer_id().clone(), + listen_addr.clone(), + ); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); + let finality_notification_stream = + Box::pin(client.finality_notification_stream().fuse()); peers.push(Peer { data, @@ -967,7 +992,7 @@ pub trait TestNetFactory: Sized where >: match (highest, peer.client.info().best_hash) { (None, b) => highest = Some(b), (Some(ref a), ref b) if a == b => {}, - (Some(_), _) => return Poll::Pending + (Some(_), _) => return Poll::Pending, } } Poll::Ready(()) @@ -1008,23 +1033,27 @@ pub trait TestNetFactory: Sized where >: /// /// Calls `poll_until_sync` repeatedly. fn block_until_sync(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx))); + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_sync(cx) + })); } /// Blocks the current thread until there are no pending packets. /// /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. fn block_until_idle(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx))); + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_idle(cx) + })); } /// Blocks the current thread until all peers are connected to each other. /// /// Calls `poll_until_connected` repeatedly with the runtime passed as parameter. fn block_until_connected(&mut self) { - futures::executor::block_on( - futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)), - ); + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_connected(cx) + })); } /// Polls the testnet. Processes all the pending actions. @@ -1038,13 +1067,17 @@ pub trait TestNetFactory: Sized where >: trace!(target: "sync", "-- Polling complete {}: {}", i, peer.id()); // We poll `imported_blocks_stream`. - while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { + while let Poll::Ready(Some(notification)) = + peer.imported_blocks_stream.as_mut().poll_next(cx) + { peer.network.service().announce_block(notification.hash, None); } // We poll `finality_notification_stream`, but we only take the last event. let mut last = None; - while let Poll::Ready(Some(item)) = peer.finality_notification_stream.as_mut().poll_next(cx) { + while let Poll::Ready(Some(item)) = + peer.finality_notification_stream.as_mut().poll_next(cx) + { last = Some(item); } if let Some(notification) = last { @@ -1063,10 +1096,7 @@ pub struct TestNet { impl TestNet { /// Create a `TestNet` that used the given fork choice rule. pub fn with_fork_choice(fork_choice: ForkChoiceStrategy) -> Self { - Self { - peers: Vec::new(), - fork_choice, - } + Self { peers: Vec::new(), fork_choice } } } @@ -1077,25 +1107,26 @@ impl TestNetFactory for TestNet { /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { - peers: Vec::new(), - fork_choice: ForkChoiceStrategy::LongestChain, - } + TestNet { peers: Vec::new(), fork_choice: ForkChoiceStrategy::LongestChain } } - fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig, _peer_data: &()) - -> Self::Verifier - { + fn make_verifier( + &self, + _client: PeersClient, + _config: &ProtocolConfig, + _peer_data: &(), + ) -> Self::Verifier { PassThroughVerifier::new_with_fork_choice(false, self.fork_choice.clone()) } - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ) - { + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) { (client.as_block_import(), None, ()) } @@ -1128,7 +1159,8 @@ impl JustificationImport for ForceFinalized { _number: NumberFor, justification: Justification, ) -> Result<(), Self::Error> { - self.0.finalize_block(BlockId::Hash(hash), Some(justification), true) + self.0 + .finalize_block(BlockId::Hash(hash), Some(justification), true) .map_err(|_| ConsensusError::InvalidJustification.into()) } } @@ -1144,7 +1176,12 @@ impl TestNetFactory for JustificationTestNet { JustificationTestNet(TestNet::from_config(config)) } - fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) -> Self::Verifier { + fn make_verifier( + &self, + client: PeersClient, + config: &ProtocolConfig, + peer_data: &(), + ) -> Self::Verifier { self.0.make_verifier(client, config, peer_data) } @@ -1156,23 +1193,21 @@ impl TestNetFactory for JustificationTestNet { self.0.peers() } - fn mut_peers>, - )>(&mut self, closure: F) { + fn mut_peers>)>( + &mut self, + closure: F, + ) { self.0.mut_peers(closure) } - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Self::PeerData, - ) - { - ( - client.as_block_import(), - Some(Box::new(ForceFinalized(client))), - Default::default(), - ) + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) { + (client.as_block_import(), Some(Box::new(ForceFinalized(client))), Default::default()) } } diff --git a/substrate/client/network/test/src/sync.rs b/substrate/client/network/test/src/sync.rs index f998c9ebde757916bbef742af6b87cee10fae24b..153a0f905bff792653dedb1a250bfaf639e1bf75 100644 --- a/substrate/client/network/test/src/sync.rs +++ b/substrate/client/network/test/src/sync.rs @@ -16,13 +16,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_consensus::BlockOrigin; -use std::time::Duration; -use futures::{Future, executor::block_on}; use super::*; -use sp_consensus::block_validation::Validation; -use substrate_test_runtime::Header; +use futures::{executor::block_on, Future}; +use sp_consensus::{block_validation::Validation, BlockOrigin}; use sp_runtime::Justifications; +use std::time::Duration; +use substrate_test_runtime::Header; fn test_ancestor_search_when_common_is(n: usize) { sp_tracing::try_init_simple(); @@ -254,9 +253,18 @@ fn sync_justifications() { // we finalize block #10, #15 and #20 for peer 0 with a justification let just = (*b"FRNK", Vec::new()); - net.peer(0).client().finalize_block(BlockId::Number(10), Some(just.clone()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(15), Some(just.clone()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(20), Some(just.clone()), true).unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(10), Some(just.clone()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(15), Some(just.clone()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(20), Some(just.clone()), true) + .unwrap(); let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); @@ -271,21 +279,15 @@ fn sync_justifications() { net.poll(cx); for height in (10..21).step_by(5) { - if net - .peer(0) - .client() - .justifications(&BlockId::Number(height)) - .unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(0).client().justifications(&BlockId::Number(height)).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending; + return Poll::Pending } - if net - .peer(1) - .client() - .justifications(&BlockId::Number(height)) - .unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(1).client().justifications(&BlockId::Number(height)).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending; + return Poll::Pending } } @@ -308,7 +310,10 @@ fn sync_justifications_across_forks() { net.block_until_sync(); let just = (*b"FRNK", Vec::new()); - net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(just), true).unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Hash(f1_best), Some(just), true) + .unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); @@ -316,16 +321,10 @@ fn sync_justifications_across_forks() { block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net - .peer(0) - .client() - .justifications(&BlockId::Number(10)) - .unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) - && net - .peer(1) - .client() - .justifications(&BlockId::Number(10)) - .unwrap() == Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(0).client().justifications(&BlockId::Number(10)).unwrap() == + Some(Justifications::from((*b"FRNK", Vec::new()))) && + net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() == + Some(Justifications::from((*b"FRNK", Vec::new()))) { Poll::Ready(()) } else { @@ -380,7 +379,8 @@ fn own_blocks_are_announced() { sp_tracing::try_init_simple(); let mut net = TestNet::new(3); net.block_until_sync(); // connect'em - net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); + net.peer(0) + .generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); net.block_until_sync(); @@ -573,7 +573,7 @@ fn can_sync_explicit_forks() { // poll until the two nodes connect, otherwise announcing the block will not work block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { Poll::Pending } else { Poll::Ready(()) @@ -658,7 +658,7 @@ fn full_sync_requires_block_body() { // Wait for nodes to connect block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { Poll::Pending } else { Poll::Ready(()) @@ -718,8 +718,14 @@ fn can_sync_to_peers_with_wrong_common_block() { // both peers re-org to the same fork without notifying each other let just = Some((*b"FRNK", Vec::new())); - net.peer(0).client().finalize_block(BlockId::Hash(fork_hash), just.clone(), true).unwrap(); - net.peer(1).client().finalize_block(BlockId::Hash(fork_hash), just, true).unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Hash(fork_hash), just.clone(), true) + .unwrap(); + net.peer(1) + .client() + .finalize_block(BlockId::Hash(fork_hash), just, true) + .unwrap(); let final_hash = net.peer(0).push_blocks(1, false); net.block_until_sync(); @@ -735,7 +741,8 @@ impl BlockAnnounceValidator for NewBestBlockAnnounceValidator { &mut self, _: &Header, _: &[u8], - ) -> Pin>> + Send>> { + ) -> Pin>> + Send>> + { async { Ok(Validation::Success { is_new_best: true }) }.boxed() } } @@ -748,16 +755,18 @@ impl BlockAnnounceValidator for FailingBlockAnnounceValidator { &mut self, header: &Header, _: &[u8], - ) -> Pin>> + Send>> { + ) -> Pin>> + Send>> + { let number = *header.number(); let target_number = self.0; - async move { Ok( - if number == target_number { + async move { + Ok(if number == target_number { Validation::Failure { disconnect: false } } else { Validation::Success { is_new_best: true } - } - ) }.boxed() + }) + } + .boxed() } } @@ -794,11 +803,13 @@ impl BlockAnnounceValidator for DeferredBlockAnnounceValidator { &mut self, _: &Header, _: &[u8], - ) -> Pin>> + Send>> { + ) -> Pin>> + Send>> + { async { futures_timer::Delay::new(std::time::Duration::from_millis(500)).await; Ok(Validation::Success { is_new_best: false }) - }.boxed() + } + .boxed() } } @@ -863,17 +874,12 @@ fn sync_to_tip_when_we_sync_together_with_multiple_peers() { let mut net = TestNet::new(3); - let block_hash = net.peer(0).push_blocks_at_without_informing_sync( - BlockId::Number(0), - 10_000, - false, - ); + let block_hash = + net.peer(0) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 10_000, false); - net.peer(1).push_blocks_at_without_informing_sync( - BlockId::Number(0), - 5_000, - false, - ); + net.peer(1) + .push_blocks_at_without_informing_sync(BlockId::Number(0), 5_000, false); net.block_until_connected(); net.block_until_idle(); @@ -897,7 +903,9 @@ fn block_announce_data_is_propagated() { &mut self, _: &Header, data: &[u8], - ) -> Pin>> + Send>> { + ) -> Pin< + Box>> + Send>, + > { let correct = data.get(0) == Some(&137); async move { if correct { @@ -905,7 +913,8 @@ fn block_announce_data_is_propagated() { } else { Ok(Validation::Failure { disconnect: false }) } - }.boxed() + } + .boxed() } } @@ -950,15 +959,19 @@ fn continue_to_sync_after_some_block_announcement_verifications_failed() { &mut self, header: &Header, _: &[u8], - ) -> Pin>> + Send>> { + ) -> Pin< + Box>> + Send>, + > { let number = *header.number(); async move { if number < 100 { - Err(Box::::from(String::from("error")) as Box<_>) + Err(Box::::from(String::from("error")) + as Box<_>) } else { Ok(Validation::Success { is_new_best: false }) } - }.boxed() + } + .boxed() } } @@ -1010,22 +1023,18 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { } // Finalize the block and make the justification available. - net.peer(0).client().finalize_block( - BlockId::Number(10), - Some((*b"FRNK", Vec::new())), - true, - ).unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(10), Some((*b"FRNK", Vec::new())), true) + .unwrap(); block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); - if net - .peer(1) - .client() - .justifications(&BlockId::Number(10)) - .unwrap() != Some(Justifications::from((*b"FRNK", Vec::new()))) + if net.peer(1).client().justifications(&BlockId::Number(10)).unwrap() != + Some(Justifications::from((*b"FRNK", Vec::new()))) { - return Poll::Pending; + return Poll::Pending } Poll::Ready(()) @@ -1091,7 +1100,7 @@ fn syncs_after_missing_announcement() { #[test] fn syncs_state() { sp_tracing::try_init_simple(); - for skip_proofs in &[ false, true ] { + for skip_proofs in &[false, true] { let mut net = TestNet::new(0); net.add_full_peer_with_config(Default::default()); net.add_full_peer_with_config(FullPeerConfig { @@ -1104,7 +1113,10 @@ fn syncs_state() { assert!(!net.peer(1).client().has_state_at(&BlockId::Number(64))); let just = (*b"FRNK", Vec::new()); - net.peer(1).client().finalize_block(BlockId::Number(60), Some(just), true).unwrap(); + net.peer(1) + .client() + .finalize_block(BlockId::Number(60), Some(just), true) + .unwrap(); // Wait for state sync. block_on(futures::future::poll_fn::<(), _>(|cx| { net.poll(cx); @@ -1133,10 +1145,7 @@ fn syncs_indexed_blocks() { sp_tracing::try_init_simple(); let mut net = TestNet::new(0); let mut n: u64 = 0; - net.add_full_peer_with_config(FullPeerConfig { - storage_chain: true, - ..Default::default() - }); + net.add_full_peer_with_config(FullPeerConfig { storage_chain: true, ..Default::default() }); net.add_full_peer_with_config(FullPeerConfig { storage_chain: true, sync_mode: SyncMode::Fast { skip_proofs: false, storage_chain_mode: true }, @@ -1145,7 +1154,8 @@ fn syncs_indexed_blocks() { net.peer(0).generate_blocks_at( BlockId::number(0), 64, - BlockOrigin::Own, |mut builder| { + BlockOrigin::Own, + |mut builder| { let ex = Extrinsic::Store(n.to_le_bytes().to_vec()); n += 1; builder.push(ex).unwrap(); @@ -1156,10 +1166,30 @@ fn syncs_indexed_blocks() { true, ); let indexed_key = sp_runtime::traits::BlakeTwo256::hash(&42u64.to_le_bytes()); - assert!(net.peer(0).client().as_full().unwrap().indexed_transaction(&indexed_key).unwrap().is_some()); - assert!(net.peer(1).client().as_full().unwrap().indexed_transaction(&indexed_key).unwrap().is_none()); + assert!(net + .peer(0) + .client() + .as_full() + .unwrap() + .indexed_transaction(&indexed_key) + .unwrap() + .is_some()); + assert!(net + .peer(1) + .client() + .as_full() + .unwrap() + .indexed_transaction(&indexed_key) + .unwrap() + .is_none()); net.block_until_sync(); - assert!(net.peer(1).client().as_full().unwrap().indexed_transaction(&indexed_key).unwrap().is_some()); + assert!(net + .peer(1) + .client() + .as_full() + .unwrap() + .indexed_transaction(&indexed_key) + .unwrap() + .is_some()); } - diff --git a/substrate/client/offchain/src/api.rs b/substrate/client/offchain/src/api.rs index 9b5ff69b726a8a03dcb1ad8aca1d1dce9b26cb02..46ba1a0f3cbc646520e02982d0f6124c7e32d8de 100644 --- a/substrate/client/offchain/src/api.rs +++ b/substrate/client/offchain/src/api.rs @@ -16,25 +16,21 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{ - str::FromStr, - sync::Arc, - convert::TryFrom, - thread::sleep, - collections::HashSet, -}; +use std::{collections::HashSet, convert::TryFrom, str::FromStr, sync::Arc, thread::sleep}; use crate::NetworkProvider; +use codec::{Decode, Encode}; use futures::Future; -use sc_network::{PeerId, Multiaddr}; -use codec::{Encode, Decode}; -use sp_core::OpaquePeerId; -use sp_core::offchain::{ - self, HttpRequestId, Timestamp, HttpRequestStatus, HttpError, - OffchainStorage, OpaqueNetworkState, OpaqueMultiaddr, StorageKind, +pub use http::SharedClient; +use sc_network::{Multiaddr, PeerId}; +use sp_core::{ + offchain::{ + self, HttpError, HttpRequestId, HttpRequestStatus, OffchainStorage, OpaqueMultiaddr, + OpaqueNetworkState, StorageKind, Timestamp, + }, + OpaquePeerId, }; pub use sp_offchain::STORAGE_PREFIX; -pub use http::SharedClient; #[cfg(not(target_os = "unknown"))] mod http; @@ -71,16 +67,15 @@ impl Db { } /// Create new instance of Offchain DB, backed by given backend. - pub fn factory_from_backend(backend: &Backend) -> Option< - Box - > where + pub fn factory_from_backend( + backend: &Backend, + ) -> Option> + where Backend: sc_client_api::Backend, Block: sp_runtime::traits::Block, Storage: 'static, { - sc_client_api::Backend::offchain_storage(backend).map(|db| - Box::new(Self::new(db)) as _ - ) + sc_client_api::Backend::offchain_storage(backend).map(|db| Box::new(Self::new(db)) as _) } } @@ -123,9 +118,8 @@ impl offchain::DbExternalities for Db { old_value.as_ref().map(hex::encode), ); match kind { - StorageKind::PERSISTENT => { - self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value) - }, + StorageKind::PERSISTENT => + self.persistent.compare_and_set(STORAGE_PREFIX, key, old_value, new_value), StorageKind::LOCAL => unavailable_yet(LOCAL_DB), } } @@ -167,10 +161,7 @@ impl offchain::Externalities for Api { fn network_state(&self) -> Result { let external_addresses = self.network_provider.external_addresses(); - let state = NetworkState::new( - self.network_provider.local_peer_id(), - external_addresses, - ); + let state = NetworkState::new(self.network_provider.local_peer_id(), external_addresses); Ok(OpaqueNetworkState::from(state)) } @@ -190,7 +181,7 @@ impl offchain::Externalities for Api { &mut self, method: &str, uri: &str, - _meta: &[u8] + _meta: &[u8], ) -> Result { self.http.request_start(method, uri) } @@ -199,7 +190,7 @@ impl offchain::Externalities for Api { &mut self, request_id: HttpRequestId, name: &str, - value: &str + value: &str, ) -> Result<(), ()> { self.http.request_add_header(request_id, name, value) } @@ -208,7 +199,7 @@ impl offchain::Externalities for Api { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { self.http.request_write_body(request_id, chunk, deadline) } @@ -216,15 +207,12 @@ impl offchain::Externalities for Api { fn http_response_wait( &mut self, ids: &[HttpRequestId], - deadline: Option + deadline: Option, ) -> Vec { self.http.response_wait(ids, deadline) } - fn http_response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)> { + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { self.http.response_headers(request_id) } @@ -232,15 +220,14 @@ impl offchain::Externalities for Api { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { self.http.response_read_body(request_id, buffer, deadline) } fn set_authorized_nodes(&mut self, nodes: Vec, authorized_only: bool) { - let peer_ids: HashSet = nodes.into_iter() - .filter_map(|node| PeerId::from_bytes(&node.0).ok()) - .collect(); + let peer_ids: HashSet = + nodes.into_iter().filter_map(|node| PeerId::from_bytes(&node.0).ok()).collect(); self.network_provider.set_authorized_peers(peer_ids); self.network_provider.set_authorized_only(authorized_only); @@ -256,10 +243,7 @@ pub struct NetworkState { impl NetworkState { fn new(peer_id: PeerId, external_addresses: Vec) -> Self { - NetworkState { - peer_id, - external_addresses, - } + NetworkState { peer_id, external_addresses } } } @@ -277,10 +261,7 @@ impl From for OpaqueNetworkState { }) .collect(); - OpaqueNetworkState { - peer_id, - external_addresses, - } + OpaqueNetworkState { peer_id, external_addresses } } } @@ -293,7 +274,8 @@ impl TryFrom for NetworkState { let bytes: Vec = Decode::decode(&mut &inner_vec[..]).map_err(|_| ())?; let peer_id = PeerId::from_bytes(&bytes).map_err(|_| ())?; - let external_addresses: Result, Self::Error> = state.external_addresses + let external_addresses: Result, Self::Error> = state + .external_addresses .iter() .map(|enc_multiaddr| -> Result { let inner_vec = &enc_multiaddr.0; @@ -305,10 +287,7 @@ impl TryFrom for NetworkState { .collect(); let external_addresses = external_addresses?; - Ok(NetworkState { - peer_id, - external_addresses, - }) + Ok(NetworkState { peer_id, external_addresses }) } } @@ -329,15 +308,9 @@ impl AsyncApi { ) -> (Api, Self) { let (http_api, http_worker) = http::http(shared_client); - let api = Api { - network_provider, - is_validator, - http: http_api, - }; + let api = Api { network_provider, is_validator, http: http_api }; - let async_api = Self { - http: Some(http_worker), - }; + let async_api = Self { http: Some(http_worker) }; (api, async_api) } @@ -355,8 +328,11 @@ mod tests { use super::*; use sc_client_db::offchain::LocalStorage; use sc_network::{NetworkStateInfo, PeerId}; - use sp_core::offchain::{Externalities, DbExternalities}; - use std::{convert::{TryFrom, TryInto}, time::SystemTime}; + use sp_core::offchain::{DbExternalities, Externalities}; + use std::{ + convert::{TryFrom, TryInto}, + time::SystemTime, + }; struct TestNetwork(); @@ -385,11 +361,7 @@ mod tests { let mock = Arc::new(TestNetwork()); let shared_client = SharedClient::new(); - AsyncApi::new( - mock, - false, - shared_client, - ) + AsyncApi::new(mock, false, shared_client) } fn offchain_db() -> Db { @@ -402,7 +374,12 @@ mod tests { // Get timestamp from std. let now = SystemTime::now(); - let d: u64 = now.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis().try_into().unwrap(); + let d: u64 = now + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_millis() + .try_into() + .unwrap(); // Get timestamp from offchain api. let timestamp = api.timestamp(); diff --git a/substrate/client/offchain/src/api/http.rs b/substrate/client/offchain/src/api/http.rs index f03f7a93b856c6c13a8c6b51c4241195c8d54652..75a27f0c7cfbee564ad72b85c26388284bb62cf7 100644 --- a/substrate/client/offchain/src/api/http.rs +++ b/substrate/client/offchain/src/api/http.rs @@ -28,16 +28,22 @@ //! actively calling any function. use crate::api::timestamp; -use bytes::buf::ext::{Reader, BufExt}; +use bytes::buf::ext::{BufExt, Reader}; use fnv::FnvHashMap; -use futures::{prelude::*, future, channel::mpsc}; -use log::error; -use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; -use std::{convert::TryFrom, fmt, io::Read as _, pin::Pin, task::{Context, Poll}}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; -use std::sync::Arc; -use hyper::{Client as HyperClient, Body, client}; +use futures::{channel::mpsc, future, prelude::*}; +use hyper::{client, Body, Client as HyperClient}; use hyper_rustls::HttpsConnector; +use log::error; +use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + convert::TryFrom, + fmt, + io::Read as _, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; /// Wrapper struct used for keeping the hyper_rustls client running. #[derive(Clone)] @@ -63,12 +69,8 @@ pub fn http(shared_client: SharedClient) -> (HttpApi, HttpWorker) { requests: FnvHashMap::default(), }; - let engine = HttpWorker { - to_api, - from_api, - http_client: shared_client.0, - requests: Vec::new(), - }; + let engine = + HttpWorker { to_api, from_api, http_client: shared_client.0, requests: Vec::new() }; (api, engine) } @@ -127,11 +129,7 @@ struct HttpApiRequestRp { impl HttpApi { /// Mimics the corresponding method in the offchain API. - pub fn request_start( - &mut self, - method: &str, - uri: &str - ) -> Result { + pub fn request_start(&mut self, method: &str, uri: &str) -> Result { // Start by building the prototype of the request. // We do this first so that we don't touch anything in `self` if building the prototype // fails. @@ -146,10 +144,11 @@ impl HttpApi { Some(new_id) => self.next_id.0 = new_id, None => { error!("Overflow in offchain worker HTTP request ID assignment"); - return Err(()); - } + return Err(()) + }, }; - self.requests.insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); + self.requests + .insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); Ok(new_id) } @@ -159,11 +158,11 @@ impl HttpApi { &mut self, request_id: HttpRequestId, name: &str, - value: &str + value: &str, ) -> Result<(), ()> { let request = match self.requests.get_mut(&request_id) { Some(&mut HttpApiRequest::NotDispatched(ref mut rq, _)) => rq, - _ => return Err(()) + _ => return Err(()), }; let name = hyper::header::HeaderName::try_from(name).map_err(drop)?; @@ -179,7 +178,7 @@ impl HttpApi { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { // Extract the request from the list. // Don't forget to add it back if necessary when returning. @@ -193,76 +192,83 @@ impl HttpApi { let mut when_ready = future::maybe_done(future::poll_fn(|cx| sender.poll_ready(cx))); futures::executor::block_on(future::select(&mut when_ready, &mut deadline)); match when_ready { - future::MaybeDone::Done(Ok(())) => {} + future::MaybeDone::Done(Ok(())) => {}, future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), - future::MaybeDone::Future(_) | - future::MaybeDone::Gone => { + future::MaybeDone::Future(_) | future::MaybeDone::Gone => { debug_assert!(matches!(deadline, future::MaybeDone::Done(..))); return Err(HttpError::DeadlineReached) - } + }, }; - futures::executor::block_on(sender.send_data(hyper::body::Bytes::from(chunk.to_owned()))) - .map_err(|_| { - error!("HTTP sender refused data despite being ready"); - HttpError::IoError - }) + futures::executor::block_on( + sender.send_data(hyper::body::Bytes::from(chunk.to_owned())), + ) + .map_err(|_| { + error!("HTTP sender refused data despite being ready"); + HttpError::IoError + }) }; loop { request = match request { HttpApiRequest::NotDispatched(request, sender) => { // If the request is not dispatched yet, dispatch it and loop again. - let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { - id: request_id, - request - }); + let _ = self + .to_worker + .unbounded_send(ApiToWorker::Dispatch { id: request_id, request }); HttpApiRequest::Dispatched(Some(sender)) - } + }, HttpApiRequest::Dispatched(Some(mut sender)) => if !chunk.is_empty() { match poll_sender(&mut sender) { Err(HttpError::IoError) => return Err(HttpError::IoError), other => { - self.requests.insert( - request_id, - HttpApiRequest::Dispatched(Some(sender)) - ); + self.requests + .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); return other - } + }, } } else { // Writing an empty body is a hint that we should stop writing. Dropping // the sender. self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); return Ok(()) - } + }, - HttpApiRequest::Response(mut response @ HttpApiRequestRp { sending_body: Some(_), .. }) => + HttpApiRequest::Response( + mut response @ HttpApiRequestRp { sending_body: Some(_), .. }, + ) => if !chunk.is_empty() { - match poll_sender(response.sending_body.as_mut() - .expect("Can only enter this match branch if Some; qed")) { + match poll_sender( + response + .sending_body + .as_mut() + .expect("Can only enter this match branch if Some; qed"), + ) { Err(HttpError::IoError) => return Err(HttpError::IoError), other => { - self.requests.insert(request_id, HttpApiRequest::Response(response)); + self.requests + .insert(request_id, HttpApiRequest::Response(response)); return other - } + }, } - } else { // Writing an empty body is a hint that we should stop writing. Dropping // the sender. - self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { - sending_body: None, - ..response - })); + self.requests.insert( + request_id, + HttpApiRequest::Response(HttpApiRequestRp { + sending_body: None, + ..response + }), + ); return Ok(()) - } + }, HttpApiRequest::Fail(_) => - // If the request has already failed, return without putting back the request - // in the list. + // If the request has already failed, return without putting back the request + // in the list. return Err(HttpError::IoError), v @ HttpApiRequest::Dispatched(None) | @@ -270,7 +276,7 @@ impl HttpApi { // We have already finished sending this body. self.requests.insert(request_id, v); return Err(HttpError::Invalid) - } + }, } } } @@ -279,30 +285,27 @@ impl HttpApi { pub fn response_wait( &mut self, ids: &[HttpRequestId], - deadline: Option + deadline: Option, ) -> Vec { // First of all, dispatch all the non-dispatched requests and drop all senders so that the // user can't write anymore data. for id in ids { match self.requests.get_mut(id) { - Some(HttpApiRequest::NotDispatched(_, _)) => {} + Some(HttpApiRequest::NotDispatched(_, _)) => {}, Some(HttpApiRequest::Dispatched(sending_body)) | Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { let _ = sending_body.take(); continue - } - _ => continue + }, + _ => continue, }; let (request, _sender) = match self.requests.remove(id) { Some(HttpApiRequest::NotDispatched(rq, s)) => (rq, s), - _ => unreachable!("we checked for NotDispatched above; qed") + _ => unreachable!("we checked for NotDispatched above; qed"), }; - let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { - id: *id, - request - }); + let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { id: *id, request }); // We also destroy the sender in order to forbid writing more data. self.requests.insert(*id, HttpApiRequest::Dispatched(None)); @@ -319,25 +322,24 @@ impl HttpApi { for id in ids { output.push(match self.requests.get(id) { None => HttpRequestStatus::Invalid, - Some(HttpApiRequest::NotDispatched(_, _)) => - unreachable!("we replaced all the NotDispatched with Dispatched earlier; qed"), + Some(HttpApiRequest::NotDispatched(_, _)) => unreachable!( + "we replaced all the NotDispatched with Dispatched earlier; qed" + ), Some(HttpApiRequest::Dispatched(_)) => { must_wait_more = true; HttpRequestStatus::DeadlineReached }, Some(HttpApiRequest::Fail(_)) => HttpRequestStatus::IoError, - Some(HttpApiRequest::Response(HttpApiRequestRp { status_code, .. })) => - HttpRequestStatus::Finished(status_code.as_u16()), + Some(HttpApiRequest::Response(HttpApiRequestRp { + status_code, .. + })) => HttpRequestStatus::Finished(status_code.as_u16()), }); } debug_assert_eq!(output.len(), ids.len()); // Are we ready to call `return`? - let is_done = if let future::MaybeDone::Done(_) = deadline { - true - } else { - !must_wait_more - }; + let is_done = + if let future::MaybeDone::Done(_) = deadline { true } else { !must_wait_more }; if is_done { // Requests in "fail" mode are purged before returning. @@ -369,47 +371,45 @@ impl HttpApi { Some(WorkerToApi::Response { id, status_code, headers, body }) => match self.requests.remove(&id) { Some(HttpApiRequest::Dispatched(sending_body)) => { - self.requests.insert(id, HttpApiRequest::Response(HttpApiRequestRp { - sending_body, - status_code, - headers, - body: body.fuse(), - current_read_chunk: None, - })); - } - None => {} // can happen if we detected an IO error when sending the body + self.requests.insert( + id, + HttpApiRequest::Response(HttpApiRequestRp { + sending_body, + status_code, + headers, + body: body.fuse(), + current_read_chunk: None, + }), + ); + }, + None => {}, // can happen if we detected an IO error when sending the body _ => error!("State mismatch between the API and worker"), - } + }, - Some(WorkerToApi::Fail { id, error }) => - match self.requests.remove(&id) { - Some(HttpApiRequest::Dispatched(_)) => { - self.requests.insert(id, HttpApiRequest::Fail(error)); - } - None => {} // can happen if we detected an IO error when sending the body - _ => error!("State mismatch between the API and worker"), - } + Some(WorkerToApi::Fail { id, error }) => match self.requests.remove(&id) { + Some(HttpApiRequest::Dispatched(_)) => { + self.requests.insert(id, HttpApiRequest::Fail(error)); + }, + None => {}, // can happen if we detected an IO error when sending the body + _ => error!("State mismatch between the API and worker"), + }, None => { error!("Worker has crashed"); return ids.iter().map(|_| HttpRequestStatus::IoError).collect() - } + }, } - } } /// Mimics the corresponding method in the offchain API. - pub fn response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)> { + pub fn response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { // Do an implicit non-blocking wait on the request. let _ = self.response_wait(&[request_id], Some(timestamp::now())); let headers = match self.requests.get(&request_id) { Some(HttpApiRequest::Response(HttpApiRequestRp { headers, .. })) => headers, - _ => return Vec::new() + _ => return Vec::new(), }; headers @@ -423,7 +423,7 @@ impl HttpApi { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { // Do an implicit wait on the request. let _ = self.response_wait(&[request_id], deadline); @@ -439,14 +439,13 @@ impl HttpApi { return Err(HttpError::DeadlineReached) }, // The request has failed. - Some(HttpApiRequest::Fail { .. }) => - return Err(HttpError::IoError), + Some(HttpApiRequest::Fail { .. }) => return Err(HttpError::IoError), // Request hasn't been dispatched yet; reading the body is invalid. Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { self.requests.insert(request_id, rq); return Err(HttpError::Invalid) - } - None => return Err(HttpError::Invalid) + }, + None => return Err(HttpError::Invalid), }; // Convert the deadline into a `Future` that resolves when the deadline is reached. @@ -456,19 +455,22 @@ impl HttpApi { // First read from `current_read_chunk`. if let Some(mut current_read_chunk) = response.current_read_chunk.take() { match current_read_chunk.read(buffer) { - Ok(0) => {} + Ok(0) => {}, Ok(n) => { - self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { - current_read_chunk: Some(current_read_chunk), - .. response - })); + self.requests.insert( + request_id, + HttpApiRequest::Response(HttpApiRequestRp { + current_read_chunk: Some(current_read_chunk), + ..response + }), + ); return Ok(n) }, Err(err) => { // This code should never be reached unless there's a logic error somewhere. error!("Failed to read from current read chunk: {:?}", err); return Err(HttpError::IoError) - } + }, } } @@ -482,7 +484,7 @@ impl HttpApi { match next_body { Some(Ok(chunk)) => response.current_read_chunk = Some(chunk.reader()), Some(Err(_)) => return Err(HttpError::IoError), - None => return Ok(0), // eof + None => return Ok(0), // eof } } @@ -496,9 +498,7 @@ impl HttpApi { impl fmt::Debug for HttpApi { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list() - .entries(self.requests.iter()) - .finish() + f.debug_list().entries(self.requests.iter()).finish() } } @@ -507,12 +507,13 @@ impl fmt::Debug for HttpApiRequest { match self { HttpApiRequest::NotDispatched(_, _) => f.debug_tuple("HttpApiRequest::NotDispatched").finish(), - HttpApiRequest::Dispatched(_) => - f.debug_tuple("HttpApiRequest::Dispatched").finish(), - HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => - f.debug_tuple("HttpApiRequest::Response").field(status_code).field(headers).finish(), - HttpApiRequest::Fail(err) => - f.debug_tuple("HttpApiRequest::Fail").field(err).finish(), + HttpApiRequest::Dispatched(_) => f.debug_tuple("HttpApiRequest::Dispatched").finish(), + HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => f + .debug_tuple("HttpApiRequest::Response") + .field(status_code) + .field(headers) + .finish(), + HttpApiRequest::Fail(err) => f.debug_tuple("HttpApiRequest::Fail").field(err).finish(), } } } @@ -525,7 +526,7 @@ enum ApiToWorker { id: HttpRequestId, /// Request to start executing. request: hyper::Request, - } + }, } /// Message send from the API to the worker. @@ -605,8 +606,8 @@ impl Future for HttpWorker { Poll::Ready(Ok(response)) => response, Poll::Ready(Err(error)) => { let _ = me.to_api.unbounded_send(WorkerToApi::Fail { id, error }); - continue; // don't insert the request back - } + continue // don't insert the request back + }, }; // We received a response! Decompose it into its parts. @@ -622,20 +623,20 @@ impl Future for HttpWorker { }); me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); - cx.waker().wake_by_ref(); // reschedule in order to poll the new future + cx.waker().wake_by_ref(); // reschedule in order to poll the new future continue - } + }, HttpWorkerRequest::ReadBody { mut body, mut tx } => { // Before reading from the HTTP response, check that `tx` is ready to accept // a new chunk. match tx.poll_ready(cx) { - Poll::Ready(Ok(())) => {} - Poll::Ready(Err(_)) => continue, // don't insert the request back + Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(_)) => continue, // don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); continue - } + }, } // `tx` is ready. Read a chunk from the socket and send it to the channel. @@ -643,31 +644,31 @@ impl Future for HttpWorker { Poll::Ready(Some(Ok(chunk))) => { let _ = tx.start_send(Ok(chunk)); me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - cx.waker().wake_by_ref(); // reschedule in order to continue reading - } + cx.waker().wake_by_ref(); // reschedule in order to continue reading + }, Poll::Ready(Some(Err(err))) => { let _ = tx.start_send(Err(err)); // don't insert the request back }, - Poll::Ready(None) => {} // EOF; don't insert the request back + Poll::Ready(None) => {}, // EOF; don't insert the request back Poll::Pending => { me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); }, } - } + }, } } // Check for messages coming from the [`HttpApi`]. match Stream::poll_next(Pin::new(&mut me.from_api), cx) { Poll::Pending => {}, - Poll::Ready(None) => return Poll::Ready(()), // stops the worker + Poll::Ready(None) => return Poll::Ready(()), // stops the worker Poll::Ready(Some(ApiToWorker::Dispatch { id, request })) => { let future = me.http_client.request(request); debug_assert!(me.requests.iter().all(|(i, _)| *i != id)); me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - cx.waker().wake_by_ref(); // reschedule the task to poll the request - } + cx.waker().wake_by_ref(); // reschedule the task to poll the request + }, } Poll::Pending @@ -676,9 +677,7 @@ impl Future for HttpWorker { impl fmt::Debug for HttpWorker { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list() - .entries(self.requests.iter()) - .finish() + f.debug_list().entries(self.requests.iter()).finish() } } @@ -695,13 +694,13 @@ impl fmt::Debug for HttpWorkerRequest { #[cfg(test)] mod tests { - use core::convert::Infallible; - use crate::api::timestamp; use super::{http, SharedClient}; - use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Duration}; + use crate::api::timestamp; + use core::convert::Infallible; use futures::future; use lazy_static::lazy_static; - + use sp_core::offchain::{Duration, HttpError, HttpRequestId, HttpRequestStatus}; + // Using lazy_static to avoid spawning lots of different SharedClients, // as spawning a SharedClient is CPU-intensive and opens lots of fds. lazy_static! { @@ -720,14 +719,17 @@ mod tests { let mut rt = tokio::runtime::Runtime::new().unwrap(); let worker = rt.spawn(worker); let server = rt.spawn(async move { - let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()) - .serve(hyper::service::make_service_fn(|_| { async move { - Ok::<_, Infallible>(hyper::service::service_fn(move |_req| async move { - Ok::<_, Infallible>( - hyper::Response::new(hyper::Body::from("Hello World!")) - ) - })) - }})); + let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve( + hyper::service::make_service_fn(|_| async move { + Ok::<_, Infallible>(hyper::service::service_fn( + move |_req| async move { + Ok::<_, Infallible>(hyper::Response::new(hyper::Body::from( + "Hello World!", + ))) + }, + )) + }), + ); let _ = addr_tx.send(server.local_addr()); server.await.map_err(drop) }); @@ -750,7 +752,7 @@ mod tests { match api.response_wait(&[id], Some(deadline))[0] { HttpRequestStatus::Finished(200) => {}, - v => panic!("Connecting to localhost failed: {:?}", v) + v => panic!("Connecting to localhost failed: {:?}", v), } let headers = api.response_headers(id); @@ -766,13 +768,13 @@ mod tests { let (mut api, addr) = build_api_server!(); match api.request_start("\0", &format!("http://{}", addr)) { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; match api.request_start("GET", "http://\0localhost") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; } @@ -781,42 +783,42 @@ mod tests { let (mut api, addr) = build_api_server!(); match api.request_add_header(HttpRequestId(0xdead), "Foo", "bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); match api.request_add_header(id, "\0", "bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); match api.request_add_header(id, "Foo", "\0") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.request_add_header(id, "Foo", "Bar").unwrap(); api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); api.response_headers(id); match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); api.response_read_body(id, &mut [], None).unwrap(); match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() + Err(()) => {}, + Ok(_) => panic!(), }; } @@ -825,13 +827,13 @@ mod tests { let (mut api, addr) = build_api_server!(); match api.request_write_body(HttpRequestId(0xdead), &[1, 2, 3], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; match api.request_write_body(HttpRequestId(0xdead), &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); @@ -839,8 +841,8 @@ mod tests { api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.request_write_body(id, &[], None).unwrap(); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); @@ -848,52 +850,52 @@ mod tests { api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.request_write_body(id, &[], None).unwrap(); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.response_wait(&[id], None); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); api.response_wait(&[id], None); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.response_headers(id); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); api.response_headers(id); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.response_read_body(id, &mut [], None).unwrap(); match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); api.response_read_body(id, &mut [], None).unwrap(); match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), }; } @@ -948,15 +950,15 @@ mod tests { let mut buf = [0; 512]; match api.response_read_body(HttpRequestId(0xdead), &mut buf, None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), } let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} match api.response_read_body(id, &mut buf, None) { - Err(HttpError::Invalid) => {} - _ => panic!() + Err(HttpError::Invalid) => {}, + _ => panic!(), } } @@ -973,16 +975,26 @@ mod tests { for _ in 0..250 { match rand::random::() % 6 { - 0 => { let _ = api.request_add_header(id, "Foo", "Bar"); } - 1 => { let _ = api.request_write_body(id, &[1, 2, 3, 4], None); } - 2 => { let _ = api.request_write_body(id, &[], None); } - 3 => { let _ = api.response_wait(&[id], None); } - 4 => { let _ = api.response_headers(id); } + 0 => { + let _ = api.request_add_header(id, "Foo", "Bar"); + }, + 1 => { + let _ = api.request_write_body(id, &[1, 2, 3, 4], None); + }, + 2 => { + let _ = api.request_write_body(id, &[], None); + }, + 3 => { + let _ = api.response_wait(&[id], None); + }, + 4 => { + let _ = api.response_headers(id); + }, 5 => { let mut buf = [0; 512]; let _ = api.response_read_body(id, &mut buf, None); - } - 6 ..= 255 => unreachable!() + }, + 6..=255 => unreachable!(), } } } diff --git a/substrate/client/offchain/src/api/http_dummy.rs b/substrate/client/offchain/src/api/http_dummy.rs index ff9c2fb2aa0295acc14e909b6394ac63ddcd725b..386fc445d4e9911836f35be43c85673436908989 100644 --- a/substrate/client/offchain/src/api/http_dummy.rs +++ b/substrate/client/offchain/src/api/http_dummy.rs @@ -18,8 +18,12 @@ //! Contains the same API as the `http` module, except that everything returns an error. -use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; -use std::{future::Future, pin::Pin, task::Context, task::Poll}; +use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; /// Wrapper struct (wrapping nothing in case of http_dummy) used for keeping the hyper_rustls client running. #[derive(Clone)] @@ -46,24 +50,17 @@ pub struct HttpWorker; impl HttpApi { /// Mimics the corresponding method in the offchain API. - pub fn request_start( - &mut self, - _: &str, - _: &str - ) -> Result { + pub fn request_start(&mut self, _: &str, _: &str) -> Result { /// Because this always returns an error, none of the other methods should ever be called. Err(()) } /// Mimics the corresponding method in the offchain API. - pub fn request_add_header( - &mut self, - _: HttpRequestId, - _: &str, - _: &str - ) -> Result<(), ()> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") + pub fn request_add_header(&mut self, _: HttpRequestId, _: &str, _: &str) -> Result<(), ()> { + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) } /// Mimics the corresponding method in the offchain API. @@ -71,33 +68,36 @@ impl HttpApi { &mut self, _: HttpRequestId, _: &[u8], - _: Option + _: Option, ) -> Result<(), HttpError> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) } /// Mimics the corresponding method in the offchain API. pub fn response_wait( &mut self, requests: &[HttpRequestId], - _: Option + _: Option, ) -> Vec { if requests.is_empty() { Vec::new() } else { - unreachable!("Creating a request always fails, thus the list of requests should \ - always be empty; qed") + unreachable!( + "Creating a request always fails, thus the list of requests should \ + always be empty; qed" + ) } } /// Mimics the corresponding method in the offchain API. - pub fn response_headers( - &mut self, - _: HttpRequestId - ) -> Vec<(Vec, Vec)> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") + pub fn response_headers(&mut self, _: HttpRequestId) -> Vec<(Vec, Vec)> { + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) } /// Mimics the corresponding method in the offchain API. @@ -105,10 +105,12 @@ impl HttpApi { &mut self, _: HttpRequestId, _: &mut [u8], - _: Option + _: Option, ) -> Result { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) } } diff --git a/substrate/client/offchain/src/api/timestamp.rs b/substrate/client/offchain/src/api/timestamp.rs index 6ea0f000f8d1996fe9d073ee76ee3d94240c7076..f1c8c004a019882ff5f49618f2f8ce3db2320ac7 100644 --- a/substrate/client/offchain/src/api/timestamp.rs +++ b/substrate/client/offchain/src/api/timestamp.rs @@ -19,8 +19,10 @@ //! Helper methods dedicated to timestamps. use sp_core::offchain::Timestamp; -use std::convert::TryInto; -use std::time::{SystemTime, Duration}; +use std::{ + convert::TryInto, + time::{Duration, SystemTime}, +}; /// Returns the current time as a `Timestamp`. pub fn now() -> Timestamp { @@ -34,9 +36,12 @@ pub fn now() -> Timestamp { Ok(d) => { let duration = d.as_millis(); // Assuming overflow won't happen for a few hundred years. - Timestamp::from_unix_millis(duration.try_into() - .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed")) - } + Timestamp::from_unix_millis( + duration + .try_into() + .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed"), + ) + }, } } @@ -60,7 +65,6 @@ pub fn deadline_to_future( // Only apply delay if we need to wait a non-zero duration Some(duration) if duration <= Duration::from_secs(0) => Either::Right(Either::Left(future::ready(()))), - Some(duration) => - Either::Right(Either::Right(futures_timer::Delay::new(duration))), + Some(duration) => Either::Right(Either::Right(futures_timer::Delay::new(duration))), }) } diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs index 21b1b7b7d21cabfa04d78912078112d87118b995..be6e4238ca5f101d5d772391ff84d7dfce99c0ba 100644 --- a/substrate/client/offchain/src/lib.rs +++ b/substrate/client/offchain/src/lib.rs @@ -35,20 +35,22 @@ #![warn(missing_docs)] -use std::{ - fmt, marker::PhantomData, sync::Arc, - collections::HashSet, -}; +use std::{collections::HashSet, fmt, marker::PhantomData, sync::Arc}; -use parking_lot::Mutex; -use threadpool::ThreadPool; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use futures::future::Future; +use futures::{ + future::{ready, Future}, + prelude::*, +}; use log::{debug, warn}; +use parking_lot::Mutex; use sc_network::{ExHashT, NetworkService, NetworkStateInfo, PeerId}; -use sp_core::{offchain, ExecutionContext, traits::SpawnNamed}; -use sp_runtime::{generic::BlockId, traits::{self, Header}}; -use futures::{prelude::*, future::ready}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_core::{offchain, traits::SpawnNamed, ExecutionContext}; +use sp_runtime::{ + generic::BlockId, + traits::{self, Header}, +}; +use threadpool::ThreadPool; mod api; @@ -94,25 +96,23 @@ impl OffchainWorkers { Self { client, _block: PhantomData, - thread_pool: Mutex::new(ThreadPool::with_name("offchain-worker".into(), num_cpus::get())), + thread_pool: Mutex::new(ThreadPool::with_name( + "offchain-worker".into(), + num_cpus::get(), + )), shared_client, } } } -impl fmt::Debug for OffchainWorkers< - Client, - Block, -> { +impl fmt::Debug for OffchainWorkers { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("OffchainWorkers").finish() } } -impl OffchainWorkers< - Client, - Block, -> where +impl OffchainWorkers +where Block: traits::Block, Client: ProvideRuntimeApi + Send + Sync + 'static, Client::Api: OffchainWorkerApi, @@ -127,28 +127,22 @@ impl OffchainWorkers< ) -> impl Future { let runtime = self.client.runtime_api(); let at = BlockId::hash(header.hash()); - let has_api_v1 = runtime.has_api_with::, _>( - &at, |v| v == 1 - ); - let has_api_v2 = runtime.has_api_with::, _>( - &at, |v| v == 2 - ); + let has_api_v1 = runtime.has_api_with::, _>(&at, |v| v == 1); + let has_api_v2 = runtime.has_api_with::, _>(&at, |v| v == 2); let version = match (has_api_v1, has_api_v2) { (_, Ok(true)) => 2, (Ok(true), _) => 1, err => { - let help = "Consider turning off offchain workers if they are not part of your runtime."; + let help = + "Consider turning off offchain workers if they are not part of your runtime."; log::error!("Unsupported Offchain Worker API version: {:?}. {}.", err, help); 0 - } + }, }; debug!("Checking offchain workers at {:?}: version:{}", at, version); if version > 0 { - let (api, runner) = api::AsyncApi::new( - network_provider, - is_validator, - self.shared_client.clone(), - ); + let (api, runner) = + api::AsyncApi::new(network_provider, is_validator, self.shared_client.clone()); debug!("Spawning offchain workers at {:?}", at); let header = header.clone(); let client = self.client.clone(); @@ -156,18 +150,19 @@ impl OffchainWorkers< let runtime = client.runtime_api(); let api = Box::new(api); debug!("Running offchain workers at {:?}", at); - let context = ExecutionContext::OffchainCall(Some( - (api, offchain::Capabilities::all()) - )); + let context = + ExecutionContext::OffchainCall(Some((api, offchain::Capabilities::all()))); let run = if version == 2 { runtime.offchain_worker_with_context(&at, context, &header) } else { #[allow(deprecated)] runtime.offchain_worker_before_version_2_with_context( - &at, context, *header.number() + &at, + context, + *header.number(), ) }; - if let Err(e) = run { + if let Err(e) = run { log::error!("Error running offchain workers at {:?}: {:?}", at, e); } }); @@ -197,50 +192,51 @@ pub async fn notification_future( offchain: Arc>, spawner: Spawner, network_provider: Arc, -) - where - Block: traits::Block, - Client: ProvideRuntimeApi + sc_client_api::BlockchainEvents + Send + Sync + 'static, - Client::Api: OffchainWorkerApi, - Spawner: SpawnNamed +) where + Block: traits::Block, + Client: + ProvideRuntimeApi + sc_client_api::BlockchainEvents + Send + Sync + 'static, + Client::Api: OffchainWorkerApi, + Spawner: SpawnNamed, { - client.import_notification_stream().for_each(move |n| { - if n.is_new_best { - spawner.spawn( - "offchain-on-block", - offchain.on_block_imported( - &n.header, - network_provider.clone(), - is_validator, - ).boxed(), - ); - } else { - log::debug!( - target: "sc_offchain", - "Skipping offchain workers for non-canon block: {:?}", - n.header, - ) - } + client + .import_notification_stream() + .for_each(move |n| { + if n.is_new_best { + spawner.spawn( + "offchain-on-block", + offchain + .on_block_imported(&n.header, network_provider.clone(), is_validator) + .boxed(), + ); + } else { + log::debug!( + target: "sc_offchain", + "Skipping offchain workers for non-canon block: {:?}", + n.header, + ) + } - ready(()) - }).await; + ready(()) + }) + .await; } #[cfg(test)] mod tests { use super::*; - use std::sync::Arc; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderProvider as _; + use sc_client_api::Backend as _; use sc_network::{Multiaddr, PeerId}; - use substrate_test_runtime_client::{ - TestClient, runtime::Block, TestClientBuilderExt, - DefaultTestClientBuilderExt, ClientBlockImportExt, - }; use sc_transaction_pool::{BasicPool, FullChainApi}; - use sc_transaction_pool_api::{TransactionPool, InPoolTransaction}; + use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_consensus::BlockOrigin; - use sc_client_api::Backend as _; - use sc_block_builder::BlockBuilderProvider as _; - use futures::executor::block_on; + use std::sync::Arc; + use substrate_test_runtime_client::{ + runtime::Block, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClient, + TestClientBuilderExt, + }; struct TestNetwork(); @@ -264,9 +260,7 @@ mod tests { } } - struct TestPool( - Arc, Block>> - ); + struct TestPool(Arc, Block>>); impl sc_transaction_pool_api::OffchainSubmitTransaction for TestPool { fn submit_at( @@ -299,9 +293,7 @@ mod tests { // when let offchain = OffchainWorkers::new(client); - futures::executor::block_on( - offchain.on_block_imported(&header, network, false) - ); + futures::executor::block_on(offchain.on_block_imported(&header, network, false)); // then assert_eq!(pool.0.status().ready, 1); @@ -314,22 +306,21 @@ mod tests { sp_tracing::try_init_simple(); - let (client, backend) = - substrate_test_runtime_client::TestClientBuilder::new() - .enable_offchain_indexing_api() - .build_with_backend(); + let (client, backend) = substrate_test_runtime_client::TestClientBuilder::new() + .enable_offchain_indexing_api() + .build_with_backend(); let mut client = Arc::new(client); let offchain_db = backend.offchain_storage().unwrap(); let key = &b"hello"[..]; let value = &b"world"[..]; let mut block_builder = client.new_block(Default::default()).unwrap(); - block_builder.push( - substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexSet( + block_builder + .push(substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexSet( key.to_vec(), value.to_vec(), - ), - ).unwrap(); + )) + .unwrap(); let block = block_builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); @@ -337,9 +328,11 @@ mod tests { assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).unwrap()); let mut block_builder = client.new_block(Default::default()).unwrap(); - block_builder.push( - substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexClear(key.to_vec()), - ).unwrap(); + block_builder + .push(substrate_test_runtime_client::runtime::Extrinsic::OffchainIndexClear( + key.to_vec(), + )) + .unwrap(); let block = block_builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); diff --git a/substrate/client/peerset/src/lib.rs b/substrate/client/peerset/src/lib.rs index 1efb21dd5389ef06f2358ee6f82de769fd281d22..398d31c78b21df2a20734c0df6e5b4f4321f342f 100644 --- a/substrate/client/peerset/src/lib.rs +++ b/substrate/client/peerset/src/lib.rs @@ -34,13 +34,17 @@ mod peersstate; -use std::{collections::HashSet, collections::VecDeque}; use futures::prelude::*; use log::{debug, error, trace}; use serde_json::json; -use std::{collections::HashMap, pin::Pin, task::{Context, Poll}, time::Duration}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::{Delay, Instant}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; pub use libp2p::PeerId; @@ -262,23 +266,22 @@ impl Peerset { pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) { let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); - let handle = PeersetHandle { - tx: tx.clone(), - }; + let handle = PeersetHandle { tx: tx.clone() }; let mut peerset = { let now = Instant::now(); Peerset { - data: peersstate::PeersState::new(config.sets.iter().map(|set| peersstate::SetConfig { - in_peers: set.in_peers, - out_peers: set.out_peers, + data: peersstate::PeersState::new(config.sets.iter().map(|set| { + peersstate::SetConfig { in_peers: set.in_peers, out_peers: set.out_peers } })), tx, rx, - reserved_nodes: config.sets.iter().map(|set| { - (set.reserved_nodes.clone(), set.reserved_only) - }).collect(), + reserved_nodes: config + .sets + .iter() + .map(|set| (set.reserved_nodes.clone(), set.reserved_only)) + .collect(), message_queue: VecDeque::new(), created: now, latest_time_update: now, @@ -310,7 +313,7 @@ impl Peerset { fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id.clone()); if !newly_inserted { - return; + return } self.data.add_no_slot_node(set_id.0, peer_id); @@ -319,34 +322,36 @@ impl Peerset { fn on_remove_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { if !self.reserved_nodes[set_id.0].0.remove(&peer_id) { - return; + return } self.data.remove_no_slot_node(set_id.0, &peer_id); // Nothing more to do if not in reserved-only mode. if !self.reserved_nodes[set_id.0].1 { - return; + return } // If, however, the peerset is in reserved-only mode, then the removed node needs to be // disconnected. if let peersstate::Peer::Connected(peer) = self.data.peer(set_id.0, &peer_id) { peer.disconnect(); - self.message_queue.push_back(Message::Drop { - set_id, - peer_id, - }); + self.message_queue.push_back(Message::Drop { set_id, peer_id }); } } fn on_set_reserved_peers(&mut self, set_id: SetId, peer_ids: HashSet) { // Determine the difference between the current group and the new list. let (to_insert, to_remove) = { - let to_insert = peer_ids.difference(&self.reserved_nodes[set_id.0].0) - .cloned().collect::>(); - let to_remove = self.reserved_nodes[set_id.0].0.difference(&peer_ids) - .cloned().collect::>(); + let to_insert = peer_ids + .difference(&self.reserved_nodes[set_id.0].0) + .cloned() + .collect::>(); + let to_remove = self.reserved_nodes[set_id.0] + .0 + .difference(&peer_ids) + .cloned() + .collect::>(); (to_insert, to_remove) }; @@ -364,20 +369,19 @@ impl Peerset { if reserved_only { // Disconnect all the nodes that aren't reserved. - for peer_id in self.data.connected_peers(set_id.0).cloned().collect::>().into_iter() { + for peer_id in + self.data.connected_peers(set_id.0).cloned().collect::>().into_iter() + { if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - continue; + continue } - let peer = self.data.peer(set_id.0, &peer_id).into_connected() - .expect("We are enumerating connected peers, therefore the peer is connected; qed"); + let peer = self.data.peer(set_id.0, &peer_id).into_connected().expect( + "We are enumerating connected peers, therefore the peer is connected; qed", + ); peer.disconnect(); - self.message_queue.push_back(Message::Drop { - set_id, - peer_id - }); + self.message_queue.push_back(Message::Drop { set_id, peer_id }); } - } else { self.alloc_slots(set_id); } @@ -402,19 +406,19 @@ impl Peerset { fn on_remove_from_peers_set(&mut self, set_id: SetId, peer_id: PeerId) { // Don't do anything if node is reserved. if self.reserved_nodes[set_id.0].0.contains(&peer_id) { - return; + return } match self.data.peer(set_id.0, &peer_id) { peersstate::Peer::Connected(peer) => { - self.message_queue.push_back(Message::Drop { - set_id, - peer_id: peer.peer_id().clone(), - }); + self.message_queue + .push_back(Message::Drop { set_id, peer_id: peer.peer_id().clone() }); peer.disconnect().forget_peer(); - } - peersstate::Peer::NotConnected(peer) => { peer.forget_peer(); } - peersstate::Peer::Unknown(_) => {} + }, + peersstate::Peer::NotConnected(peer) => { + peer.forget_peer(); + }, + peersstate::Peer::Unknown(_) => {}, } } @@ -428,7 +432,7 @@ impl Peerset { trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", peer_id, change.value, reputation.reputation(), change.reason ); - return; + return } debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", @@ -490,7 +494,7 @@ impl Peerset { peer_reputation.set_reputation(after); if after != 0 { - continue; + continue } drop(peer_reputation); @@ -499,15 +503,15 @@ impl Peerset { // forget it. for set_index in 0..self.data.num_sets() { match self.data.peer(set_index, &peer_id) { - peersstate::Peer::Connected(_) => {} + peersstate::Peer::Connected(_) => {}, peersstate::Peer::NotConnected(peer) => { if peer.last_connected_or_discovered() + FORGET_AFTER < now { peer.forget_peer(); } - } + }, peersstate::Peer::Unknown(_) => { // Happens if this peer does not belong to this set. - } + }, } } } @@ -531,14 +535,13 @@ impl Peerset { // remove that check. If necessary, the peerset should be refactored to give more // control over what happens in that situation. if entry.reputation() < BANNED_THRESHOLD { - break; + break } match entry.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect { - set_id, - peer_id: conn.into_peer_id() - }), + Ok(conn) => self + .message_queue + .push_back(Message::Connect { set_id, peer_id: conn.into_peer_id() }), Err(_) => { // An error is returned only if no slot is available. Reserved nodes are // marked in the state machine with a flag saying "doesn't occupy a slot", @@ -548,7 +551,7 @@ impl Peerset { target: "peerset", "Not enough slots to connect to reserved node" ); - } + }, } } @@ -556,7 +559,7 @@ impl Peerset { // Nothing more to do if we're in reserved mode. if self.reserved_nodes[set_id.0].1 { - return; + return } // Try to grab the next node to attempt to connect to. @@ -565,25 +568,24 @@ impl Peerset { while self.data.has_free_outgoing_slot(set_id.0) { let next = match self.data.highest_not_connected_peer(set_id.0) { Some(n) => n, - None => break + None => break, }; // Don't connect to nodes with an abysmal reputation. if next.reputation() < BANNED_THRESHOLD { - break; + break } match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect { - set_id, - peer_id: conn.into_peer_id() - }), + Ok(conn) => self + .message_queue + .push_back(Message::Connect { set_id, peer_id: conn.into_peer_id() }), Err(_) => { // This branch can only be entered if there is no free slot, which is // checked above. debug_assert!(false); - break; - } + break + }, } } } @@ -594,7 +596,6 @@ impl Peerset { /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming /// connection implicitly means `Connect`, but incoming connections aren't cancelled by /// `dropped`. - /// // Implementation note: because of concurrency issues, it is possible that we push a `Connect` // message to the output channel with a `PeerId`, and that `incoming` gets called with the same // `PeerId` before that message has been read by the user. In this situation we must not answer. @@ -606,7 +607,7 @@ impl Peerset { if self.reserved_nodes[set_id.0].1 { if !self.reserved_nodes[set_id.0].0.contains(&peer_id) { self.message_queue.push_back(Message::Reject(index)); - return; + return } } @@ -646,7 +647,7 @@ impl Peerset { trace!(target: "peerset", "Dropping {}: {:+} to {}", peer_id, DISCONNECT_REPUTATION_CHANGE, entry.reputation()); entry.disconnect(); - } + }, peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => error!(target: "peerset", "Received dropped() for non-connected node"), } @@ -710,10 +711,11 @@ impl Stream for Peerset { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { loop { if let Some(message) = self.message_queue.pop_front() { - return Poll::Ready(Some(message)); + return Poll::Ready(Some(message)) } - if let Poll::Ready(_) = Future::poll(Pin::new(&mut self.next_periodic_alloc_slots), cx) { + if let Poll::Ready(_) = Future::poll(Pin::new(&mut self.next_periodic_alloc_slots), cx) + { self.next_periodic_alloc_slots = Delay::new(Duration::new(1, 0)); for set_index in 0..self.data.num_sets() { @@ -736,8 +738,7 @@ impl Stream for Peerset { self.on_set_reserved_peers(set_id, peer_ids), Action::SetReservedOnly(set_id, reserved) => self.on_set_reserved_only(set_id, reserved), - Action::ReportPeer(peer_id, score_diff) => - self.on_report_peer(peer_id, score_diff), + Action::ReportPeer(peer_id, score_diff) => self.on_report_peer(peer_id, score_diff), Action::AddToPeersSet(sets_name, peer_id) => self.add_to_peers_set(sets_name, peer_id), Action::RemoveFromPeersSet(sets_name, peer_id) => @@ -760,9 +761,12 @@ pub enum DropReason { #[cfg(test)] mod tests { - use libp2p::PeerId; + use super::{ + IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId, + BANNED_THRESHOLD, + }; use futures::prelude::*; - use super::{PeersetConfig, Peerset, Message, IncomingIndex, ReputationChange, SetConfig, SetId, BANNED_THRESHOLD}; + use libp2p::PeerId; use std::{pin::Pin, task::Poll, thread, time::Duration}; fn assert_messages(mut peerset: Peerset, messages: Vec) -> Peerset { @@ -799,10 +803,13 @@ mod tests { handle.add_reserved_peer(SetId::from(0), reserved_peer.clone()); handle.add_reserved_peer(SetId::from(0), reserved_peer2.clone()); - assert_messages(peerset, vec![ - Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer }, - Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer2 } - ]); + assert_messages( + peerset, + vec![ + Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer }, + Message::Connect { set_id: SetId::from(0), peer_id: reserved_peer2 }, + ], + ); } #[test] @@ -831,12 +838,15 @@ mod tests { peerset.incoming(SetId::from(0), incoming2.clone(), ii2); peerset.incoming(SetId::from(0), incoming3.clone(), ii3); - assert_messages(peerset, vec![ - Message::Connect { set_id: SetId::from(0), peer_id: bootnode.clone() }, - Message::Accept(ii), - Message::Accept(ii2), - Message::Reject(ii3), - ]); + assert_messages( + peerset, + vec![ + Message::Connect { set_id: SetId::from(0), peer_id: bootnode.clone() }, + Message::Accept(ii), + Message::Accept(ii2), + Message::Reject(ii3), + ], + ); } #[test] @@ -856,9 +866,7 @@ mod tests { let (mut peerset, _) = Peerset::from_config(config); peerset.incoming(SetId::from(0), incoming.clone(), ii); - assert_messages(peerset, vec![ - Message::Reject(ii), - ]); + assert_messages(peerset, vec![Message::Reject(ii)]); } #[test] @@ -881,10 +889,13 @@ mod tests { peerset.add_to_peers_set(SetId::from(0), discovered.clone()); peerset.add_to_peers_set(SetId::from(0), discovered2); - assert_messages(peerset, vec![ - Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, - Message::Connect { set_id: SetId::from(0), peer_id: discovered }, - ]); + assert_messages( + peerset, + vec![ + Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, + Message::Connect { set_id: SetId::from(0), peer_id: discovered }, + ], + ); } #[test] diff --git a/substrate/client/peerset/src/peersstate.rs b/substrate/client/peerset/src/peersstate.rs index 9f54a7714fd05ecea25d8cd483292d0aa37fd077..e4062bf938b3cb24bc842ea3ec68e8ce27f79e07 100644 --- a/substrate/client/peerset/src/peersstate.rs +++ b/substrate/client/peerset/src/peersstate.rs @@ -32,7 +32,10 @@ use libp2p::PeerId; use log::error; use std::{ borrow::Cow, - collections::{HashMap, HashSet, hash_map::{Entry, OccupiedEntry}}, + collections::{ + hash_map::{Entry, OccupiedEntry}, + HashMap, HashSet, + }, }; use wasm_timer::Instant; @@ -42,7 +45,6 @@ use wasm_timer::Instant; /// /// This struct is nothing more but a data structure containing a list of nodes, where each node /// has a reputation and is either connected to us or not. -/// #[derive(Debug, Clone)] pub struct PeersState { /// List of nodes that we know about. @@ -104,10 +106,7 @@ struct Node { impl Node { fn new(num_sets: usize) -> Node { - Node { - sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), - reputation: 0, - } + Node { sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0 } } } @@ -185,25 +184,16 @@ impl PeersState { /// # Panic /// /// `set` must be within range of the sets passed to [`PeersState::new`]. - /// pub fn peer<'a>(&'a mut self, set: usize, peer_id: &'a PeerId) -> Peer<'a> { // The code below will panic anyway if this happens to be false, but this earlier assert // makes it explicit what is wrong. assert!(set < self.sets.len()); match self.nodes.get_mut(peer_id).map(|p| &p.sets[set]) { - None | Some(MembershipState::NotMember) => Peer::Unknown(UnknownPeer { - parent: self, - set, - peer_id: Cow::Borrowed(peer_id), - }), - Some(MembershipState::In) | Some(MembershipState::Out) => { - Peer::Connected(ConnectedPeer { - state: self, - set, - peer_id: Cow::Borrowed(peer_id), - }) - } + None | Some(MembershipState::NotMember) => + Peer::Unknown(UnknownPeer { parent: self, set, peer_id: Cow::Borrowed(peer_id) }), + Some(MembershipState::In) | Some(MembershipState::Out) => + Peer::Connected(ConnectedPeer { state: self, set, peer_id: Cow::Borrowed(peer_id) }), Some(MembershipState::NotConnected { .. }) => Peer::NotConnected(NotConnectedPeer { state: self, set, @@ -224,7 +214,6 @@ impl PeersState { /// # Panic /// /// `set` must be within range of the sets passed to [`PeersState::new`]. - /// // Note: this method could theoretically return a `ConnectedPeer`, but implementing that // isn't simple. pub fn connected_peers(&self, set: usize) -> impl Iterator { @@ -245,7 +234,6 @@ impl PeersState { /// # Panic /// /// `set` must be within range of the sets passed to [`PeersState::new`]. - /// pub fn highest_not_connected_peer(&mut self, set: usize) -> Option { // The code below will panic anyway if this happens to be false, but this earlier assert // makes it explicit what is wrong. @@ -254,18 +242,16 @@ impl PeersState { let outcome = self .nodes .iter_mut() - .filter(|(_, Node { sets, .. })| { - match sets[set] { - MembershipState::NotMember => false, - MembershipState::In => false, - MembershipState::Out => false, - MembershipState::NotConnected { .. } => true, - } + .filter(|(_, Node { sets, .. })| match sets[set] { + MembershipState::NotMember => false, + MembershipState::In => false, + MembershipState::Out => false, + MembershipState::NotConnected { .. } => true, }) .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { if let Some(cur_node) = cur_node.take() { if cur_node.1.reputation >= to_try.1.reputation { - return Some(cur_node); + return Some(cur_node) } } Some(to_try) @@ -273,10 +259,10 @@ impl PeersState { .map(|(peer_id, _)| peer_id.clone()); outcome.map(move |peer_id| NotConnectedPeer { - state: self, - set, - peer_id: Cow::Owned(peer_id), - }) + state: self, + set, + peer_id: Cow::Owned(peer_id), + }) } /// Returns `true` if there is a free outgoing slot available related to this set. @@ -290,14 +276,14 @@ impl PeersState { pub fn add_no_slot_node(&mut self, set: usize, peer_id: PeerId) { // Reminder: `HashSet::insert` returns false if the node was already in the set if !self.sets[set].no_slot_nodes.insert(peer_id.clone()) { - return; + return } if let Some(peer) = self.nodes.get_mut(&peer_id) { match peer.sets[set] { MembershipState::In => self.sets[set].num_in -= 1, MembershipState::Out => self.sets[set].num_out -= 1, - MembershipState::NotConnected { .. } | MembershipState::NotMember => {} + MembershipState::NotConnected { .. } | MembershipState::NotMember => {}, } } } @@ -308,14 +294,14 @@ impl PeersState { pub fn remove_no_slot_node(&mut self, set: usize, peer_id: &PeerId) { // Reminder: `HashSet::remove` returns false if the node was already not in the set if !self.sets[set].no_slot_nodes.remove(peer_id) { - return; + return } if let Some(peer) = self.nodes.get_mut(peer_id) { match peer.sets[set] { MembershipState::In => self.sets[set].num_in += 1, MembershipState::Out => self.sets[set].num_out += 1, - MembershipState::NotConnected { .. } | MembershipState::NotMember => {} + MembershipState::NotConnected { .. } | MembershipState::NotMember => {}, } } } @@ -396,24 +382,15 @@ impl<'a> ConnectedPeer<'a> { false, "State inconsistency: disconnecting a disconnected node" ) - } + }, } } - node.sets[self.set] = MembershipState::NotConnected { - last_connected: Instant::now(), - }; + node.sets[self.set] = MembershipState::NotConnected { last_connected: Instant::now() }; } else { - debug_assert!( - false, - "State inconsistency: disconnecting a disconnected node" - ); + debug_assert!(false, "State inconsistency: disconnecting a disconnected node"); } - NotConnectedPeer { - state: self.state, - set: self.set, - peer_id: self.peer_id, - } + NotConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id } } /// Performs an arithmetic addition on the reputation score of that peer. @@ -425,10 +402,7 @@ impl<'a> ConnectedPeer<'a> { if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { node.reputation = node.reputation.saturating_add(modifier); } else { - debug_assert!( - false, - "State inconsistency: add_reputation on an unknown node" - ); + debug_assert!(false, "State inconsistency: add_reputation on an unknown node"); } } @@ -436,10 +410,7 @@ impl<'a> ConnectedPeer<'a> { /// /// > **Note**: Reputation values aren't specific to a set but are global per peer. pub fn reputation(&self) -> i32 { - self.state - .nodes - .get(&*self.peer_id) - .map_or(0, |p| p.reputation) + self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) } } @@ -483,8 +454,8 @@ impl<'a> NotConnectedPeer<'a> { "State inconsistency with {}; not connected after borrow", self.peer_id ); - return Instant::now(); - } + return Instant::now() + }, }; match state.sets[self.set] { @@ -492,7 +463,7 @@ impl<'a> NotConnectedPeer<'a> { _ => { error!(target: "peerset", "State inconsistency with {}", self.peer_id); Instant::now() - } + }, } } @@ -508,7 +479,7 @@ impl<'a> NotConnectedPeer<'a> { // Note that it is possible for num_out to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. if !self.state.has_free_outgoing_slot(self.set) && !is_no_slot_occupy { - return Err(self); + return Err(self) } if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { @@ -517,17 +488,10 @@ impl<'a> NotConnectedPeer<'a> { self.state.sets[self.set].num_out += 1; } } else { - debug_assert!( - false, - "State inconsistency: try_outgoing on an unknown node" - ); + debug_assert!(false, "State inconsistency: try_outgoing on an unknown node"); } - Ok(ConnectedPeer { - state: self.state, - set: self.set, - peer_id: self.peer_id, - }) + Ok(ConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id }) } /// Tries to accept the peer as an incoming connection. @@ -541,10 +505,10 @@ impl<'a> NotConnectedPeer<'a> { // Note that it is possible for num_in to be strictly superior to the max, in case we were // connected to reserved node then marked them as not reserved. - if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in - && !is_no_slot_occupy + if self.state.sets[self.set].num_in >= self.state.sets[self.set].max_in && + !is_no_slot_occupy { - return Err(self); + return Err(self) } if let Some(peer) = self.state.nodes.get_mut(&*self.peer_id) { @@ -553,27 +517,17 @@ impl<'a> NotConnectedPeer<'a> { self.state.sets[self.set].num_in += 1; } } else { - debug_assert!( - false, - "State inconsistency: try_accept_incoming on an unknown node" - ); + debug_assert!(false, "State inconsistency: try_accept_incoming on an unknown node"); } - Ok(ConnectedPeer { - state: self.state, - set: self.set, - peer_id: self.peer_id, - }) + Ok(ConnectedPeer { state: self.state, set: self.set, peer_id: self.peer_id }) } /// Returns the reputation value of the node. /// /// > **Note**: Reputation values aren't specific to a set but are global per peer. pub fn reputation(&self) -> i32 { - self.state - .nodes - .get(&*self.peer_id) - .map_or(0, |p| p.reputation) + self.state.nodes.get(&*self.peer_id).map_or(0, |p| p.reputation) } /// Sets the reputation of the peer. @@ -584,10 +538,7 @@ impl<'a> NotConnectedPeer<'a> { if let Some(node) = self.state.nodes.get_mut(&*self.peer_id) { node.reputation = value; } else { - debug_assert!( - false, - "State inconsistency: set_reputation on an unknown node" - ); + debug_assert!(false, "State inconsistency: set_reputation on an unknown node"); } } @@ -598,10 +549,8 @@ impl<'a> NotConnectedPeer<'a> { peer.sets[self.set] = MembershipState::NotMember; // Remove the peer from `self.state.nodes` entirely if it isn't a member of any set. - if peer.reputation == 0 && peer - .sets - .iter() - .all(|set| matches!(set, MembershipState::NotMember)) + if peer.reputation == 0 && + peer.sets.iter().all(|set| matches!(set, MembershipState::NotMember)) { self.state.nodes.remove(&*self.peer_id); } @@ -614,11 +563,7 @@ impl<'a> NotConnectedPeer<'a> { ); }; - UnknownPeer { - parent: self.state, - set: self.set, - peer_id: self.peer_id, - } + UnknownPeer { parent: self.state, set: self.set, peer_id: self.peer_id } } } @@ -641,15 +586,9 @@ impl<'a> UnknownPeer<'a> { .nodes .entry(self.peer_id.clone().into_owned()) .or_insert_with(|| Node::new(num_sets)) - .sets[self.set] = MembershipState::NotConnected { - last_connected: Instant::now(), - }; + .sets[self.set] = MembershipState::NotConnected { last_connected: Instant::now() }; - NotConnectedPeer { - state: self.parent, - set: self.set, - peer_id: self.peer_id, - } + NotConnectedPeer { state: self.parent, set: self.set, peer_id: self.peer_id } } } @@ -699,10 +638,7 @@ mod tests { #[test] fn full_slots_in() { - let mut peers_state = PeersState::new(iter::once(SetConfig { - in_peers: 1, - out_peers: 1, - })); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id1 = PeerId::random(); let id2 = PeerId::random(); @@ -717,10 +653,7 @@ mod tests { #[test] fn no_slot_node_doesnt_use_slot() { - let mut peers_state = PeersState::new(iter::once(SetConfig { - in_peers: 1, - out_peers: 1, - })); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id1 = PeerId::random(); let id2 = PeerId::random(); @@ -740,10 +673,7 @@ mod tests { #[test] fn disconnecting_frees_slot() { - let mut peers_state = PeersState::new(iter::once(SetConfig { - in_peers: 1, - out_peers: 1, - })); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id1 = PeerId::random(); let id2 = PeerId::random(); @@ -761,11 +691,7 @@ mod tests { .discover() .try_accept_incoming() .is_err()); - peers_state - .peer(0, &id1) - .into_connected() - .unwrap() - .disconnect(); + peers_state.peer(0, &id1).into_connected().unwrap().disconnect(); assert!(peers_state .peer(0, &id2) .into_not_connected() @@ -776,41 +702,21 @@ mod tests { #[test] fn highest_not_connected_peer() { - let mut peers_state = PeersState::new(iter::once(SetConfig { - in_peers: 25, - out_peers: 25, - })); + let mut peers_state = + PeersState::new(iter::once(SetConfig { in_peers: 25, out_peers: 25 })); let id1 = PeerId::random(); let id2 = PeerId::random(); assert!(peers_state.highest_not_connected_peer(0).is_none()); - peers_state - .peer(0, &id1) - .into_unknown() - .unwrap() - .discover() - .set_reputation(50); - peers_state - .peer(0, &id2) - .into_unknown() - .unwrap() - .discover() - .set_reputation(25); + peers_state.peer(0, &id1).into_unknown().unwrap().discover().set_reputation(50); + peers_state.peer(0, &id2).into_unknown().unwrap().discover().set_reputation(25); assert_eq!( - peers_state - .highest_not_connected_peer(0) - .map(|p| p.into_peer_id()), + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id1.clone()) ); - peers_state - .peer(0, &id2) - .into_not_connected() - .unwrap() - .set_reputation(75); + peers_state.peer(0, &id2).into_not_connected().unwrap().set_reputation(75); assert_eq!( - peers_state - .highest_not_connected_peer(0) - .map(|p| p.into_peer_id()), + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id2.clone()) ); peers_state @@ -820,46 +726,25 @@ mod tests { .try_accept_incoming() .unwrap(); assert_eq!( - peers_state - .highest_not_connected_peer(0) - .map(|p| p.into_peer_id()), + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id1.clone()) ); - peers_state - .peer(0, &id1) - .into_not_connected() - .unwrap() - .set_reputation(100); - peers_state - .peer(0, &id2) - .into_connected() - .unwrap() - .disconnect(); + peers_state.peer(0, &id1).into_not_connected().unwrap().set_reputation(100); + peers_state.peer(0, &id2).into_connected().unwrap().disconnect(); assert_eq!( - peers_state - .highest_not_connected_peer(0) - .map(|p| p.into_peer_id()), + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id1.clone()) ); - peers_state - .peer(0, &id1) - .into_not_connected() - .unwrap() - .set_reputation(-100); + peers_state.peer(0, &id1).into_not_connected().unwrap().set_reputation(-100); assert_eq!( - peers_state - .highest_not_connected_peer(0) - .map(|p| p.into_peer_id()), + peers_state.highest_not_connected_peer(0).map(|p| p.into_peer_id()), Some(id2.clone()) ); } #[test] fn disconnect_no_slot_doesnt_panic() { - let mut peers_state = PeersState::new(iter::once(SetConfig { - in_peers: 1, - out_peers: 1, - })); + let mut peers_state = PeersState::new(iter::once(SetConfig { in_peers: 1, out_peers: 1 })); let id = PeerId::random(); peers_state.add_no_slot_node(0, id.clone()); let peer = peers_state diff --git a/substrate/client/peerset/tests/fuzz.rs b/substrate/client/peerset/tests/fuzz.rs index 96d1a48683f184dabc57ef2bf476cc960520b3a8..3a9ba686ee95ce789097d2fb3214698321fc5922 100644 --- a/substrate/client/peerset/tests/fuzz.rs +++ b/substrate/client/peerset/tests/fuzz.rs @@ -18,10 +18,18 @@ use futures::prelude::*; use libp2p::PeerId; -use rand::distributions::{Distribution, Uniform, WeightedIndex}; -use rand::seq::IteratorRandom; -use sc_peerset::{DropReason, IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId}; -use std::{collections::HashMap, collections::HashSet, pin::Pin, task::Poll}; +use rand::{ + distributions::{Distribution, Uniform, WeightedIndex}, + seq::IteratorRandom, +}; +use sc_peerset::{ + DropReason, IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, SetConfig, SetId, +}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, + task::Poll, +}; #[test] fn run() { @@ -40,30 +48,28 @@ fn test_once() { let mut reserved_nodes = HashSet::::new(); let (mut peerset, peerset_handle) = Peerset::from_config(PeersetConfig { - sets: vec![ - SetConfig { - bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng)) + sets: vec![SetConfig { + bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + id + }) + .collect(), + reserved_nodes: { + (0..Uniform::new_inclusive(0, 2).sample(&mut rng)) .map(|_| { let id = PeerId::random(); known_nodes.insert(id.clone()); + reserved_nodes.insert(id.clone()); id }) - .collect(), - reserved_nodes: { - (0..Uniform::new_inclusive(0, 2).sample(&mut rng)) - .map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - reserved_nodes.insert(id.clone()); - id - }) - .collect() - }, - in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, + .collect() }, - ], + in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, + }], }); futures::executor::block_on(futures::future::poll_fn(move |cx| { @@ -81,33 +87,28 @@ fn test_once() { for _ in 0..2500 { // Each of these weights corresponds to an action that we may perform. let action_weights = [150, 90, 90, 30, 30, 1, 1, 4, 4]; - match WeightedIndex::new(&action_weights) - .unwrap() - .sample(&mut rng) - { + match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) { // If we generate 0, poll the peerset. 0 => match Stream::poll_next(Pin::new(&mut peerset), cx) { Poll::Ready(Some(Message::Connect { peer_id, .. })) => { - if let Some(id) = incoming_nodes - .iter() - .find(|(_, v)| **v == peer_id) - .map(|(&id, _)| id) + if let Some(id) = + incoming_nodes.iter().find(|(_, v)| **v == peer_id).map(|(&id, _)| id) { incoming_nodes.remove(&id); } assert!(connected_nodes.insert(peer_id)); - } + }, Poll::Ready(Some(Message::Drop { peer_id, .. })) => { connected_nodes.remove(&peer_id); - } + }, Poll::Ready(Some(Message::Accept(n))) => { assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())) - } + }, Poll::Ready(Some(Message::Reject(n))) => { assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())) - } + }, Poll::Ready(None) => panic!(), - Poll::Pending => {} + Poll::Pending => {}, }, // If we generate 1, discover a new node. @@ -115,32 +116,29 @@ fn test_once() { let new_id = PeerId::random(); known_nodes.insert(new_id.clone()); peerset.add_to_peers_set(SetId::from(0), new_id); - } + }, // If we generate 2, adjust a random reputation. - 2 => { + 2 => if let Some(id) = known_nodes.iter().choose(&mut rng) { - let val = Uniform::new_inclusive(i32::MIN, i32::MAX) - .sample(&mut rng); + let val = Uniform::new_inclusive(i32::MIN, i32::MAX).sample(&mut rng); peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); - } - } + }, // If we generate 3, disconnect from a random node. - 3 => { + 3 => if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { connected_nodes.remove(&id); peerset.dropped(SetId::from(0), id, DropReason::Unknown); - } - } + }, // If we generate 4, connect to a random node. 4 => { if let Some(id) = known_nodes .iter() .filter(|n| { - incoming_nodes.values().all(|m| m != *n) - && !connected_nodes.contains(*n) + incoming_nodes.values().all(|m| m != *n) && + !connected_nodes.contains(*n) }) .choose(&mut rng) { @@ -148,7 +146,7 @@ fn test_once() { incoming_nodes.insert(next_incoming_id.clone(), id.clone()); next_incoming_id.0 += 1; } - } + }, // 5 and 6 are the reserved-only mode. 5 => peerset_handle.set_reserved_only(SetId::from(0), true), @@ -156,21 +154,18 @@ fn test_once() { // 7 and 8 are about switching a random node in or out of reserved mode. 7 => { - if let Some(id) = known_nodes - .iter() - .filter(|n| !reserved_nodes.contains(*n)) - .choose(&mut rng) + if let Some(id) = + known_nodes.iter().filter(|n| !reserved_nodes.contains(*n)).choose(&mut rng) { peerset_handle.add_reserved_peer(SetId::from(0), id.clone()); reserved_nodes.insert(id.clone()); } - } - 8 => { + }, + 8 => if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { reserved_nodes.remove(&id); peerset_handle.remove_reserved_peer(SetId::from(0), id); - } - } + }, _ => unreachable!(), } diff --git a/substrate/client/proposer-metrics/src/lib.rs b/substrate/client/proposer-metrics/src/lib.rs index 8fec9779de472dcad55df85fa0bb430111907fa3..da29fb2951995fd77d442d947756bc1e75d2aa00 100644 --- a/substrate/client/proposer-metrics/src/lib.rs +++ b/substrate/client/proposer-metrics/src/lib.rs @@ -18,7 +18,9 @@ //! Prometheus basic proposer metrics. -use prometheus_endpoint::{register, PrometheusError, Registry, Histogram, HistogramOpts, Gauge, U64}; +use prometheus_endpoint::{ + register, Gauge, Histogram, HistogramOpts, PrometheusError, Registry, U64, +}; /// Optional shareable link to basic authorship metrics. #[derive(Clone, Default)] @@ -26,13 +28,13 @@ pub struct MetricsLink(Option); impl MetricsLink { pub fn new(registry: Option<&Registry>) -> Self { - Self( - registry.and_then(|registry| - Metrics::register(registry) - .map_err(|err| log::warn!("Failed to register proposer prometheus metrics: {}", err)) - .ok() - ) - ) + Self(registry.and_then(|registry| { + Metrics::register(registry) + .map_err(|err| { + log::warn!("Failed to register proposer prometheus metrics: {}", err) + }) + .ok() + })) } pub fn report(&self, do_this: impl FnOnce(&Metrics) -> O) -> Option { diff --git a/substrate/client/rpc-api/src/author/error.rs b/substrate/client/rpc-api/src/author/error.rs index 009a0a290d6ba9d6bec85edaf65d9d32eef60fce..0c963d4e4c2596e3dad4978a2e4be75cb895782c 100644 --- a/substrate/client/rpc-api/src/author/error.rs +++ b/substrate/client/rpc-api/src/author/error.rs @@ -32,33 +32,33 @@ pub type FutureResult = Box #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { /// Client error. - #[display(fmt="Client error: {}", _0)] + #[display(fmt = "Client error: {}", _0)] #[from(ignore)] Client(Box), /// Transaction pool error, - #[display(fmt="Transaction pool error: {}", _0)] + #[display(fmt = "Transaction pool error: {}", _0)] Pool(sc_transaction_pool_api::error::Error), /// Verification error - #[display(fmt="Extrinsic verification error: {}", _0)] + #[display(fmt = "Extrinsic verification error: {}", _0)] #[from(ignore)] Verification(Box), /// Incorrect extrinsic format. - #[display(fmt="Invalid extrinsic format: {}", _0)] + #[display(fmt = "Invalid extrinsic format: {}", _0)] BadFormat(codec::Error), /// Incorrect seed phrase. - #[display(fmt="Invalid seed phrase/SURI")] + #[display(fmt = "Invalid seed phrase/SURI")] BadSeedPhrase, /// Key type ID has an unknown format. - #[display(fmt="Invalid key type ID format (should be of length four)")] + #[display(fmt = "Invalid key type ID format (should be of length four)")] BadKeyType, /// Key type ID has some unsupported crypto. - #[display(fmt="The crypto of key type ID is unknown")] + #[display(fmt = "The crypto of key type ID is unknown")] UnsupportedKeyType, /// Some random issue with the key store. Shouldn't happen. - #[display(fmt="The key store is unavailable")] + #[display(fmt = "The key store is unavailable")] KeyStoreUnavailable, /// Invalid session keys encoding. - #[display(fmt="Session keys are not encoded correctly")] + #[display(fmt = "Session keys are not encoded correctly")] InvalidSessionKeys, /// Call to an unsafe RPC was denied. UnsafeRpcCalled(crate::policy::UnsafeRpcError), @@ -105,7 +105,7 @@ const POOL_UNACTIONABLE: i64 = POOL_INVALID_TX + 8; impl From for rpc::Error { fn from(e: Error) -> Self { - use sc_transaction_pool_api::error::{Error as PoolError}; + use sc_transaction_pool_api::error::Error as PoolError; match e { Error::BadFormat(e) => rpc::Error { diff --git a/substrate/client/rpc-api/src/author/hash.rs b/substrate/client/rpc-api/src/author/hash.rs index 618159a8ad4d5fe730c17378b83d83d639a73b84..c4acfb819ddbb465cee0d4c99521d86c5a6bce96 100644 --- a/substrate/client/rpc-api/src/author/hash.rs +++ b/substrate/client/rpc-api/src/author/hash.rs @@ -18,8 +18,8 @@ //! Extrinsic helpers for author RPC module. +use serde::{Deserialize, Serialize}; use sp_core::Bytes; -use serde::{Serialize, Deserialize}; /// RPC Extrinsic or hash /// diff --git a/substrate/client/rpc-api/src/author/mod.rs b/substrate/client/rpc-api/src/author/mod.rs index 70da73ee8a00ee2566bdf47de1117c006a8fe734..dbf729ea18adc70eab5d200ec52eee63acfd17e0 100644 --- a/substrate/client/rpc-api/src/author/mod.rs +++ b/substrate/client/rpc-api/src/author/mod.rs @@ -21,11 +21,11 @@ pub mod error; pub mod hash; +use self::error::{FutureResult, Result}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use sp_core::Bytes; use sc_transaction_pool_api::TransactionStatus; -use self::error::{FutureResult, Result}; +use sp_core::Bytes; pub use self::gen_client::Client as AuthorClient; @@ -41,12 +41,7 @@ pub trait AuthorApi { /// Insert a key into the keystore. #[rpc(name = "author_insertKey")] - fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) -> Result<()>; + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()>; /// Generate new session keys and returns the corresponding public keys. #[rpc(name = "author_rotateKeys")] @@ -72,8 +67,9 @@ pub trait AuthorApi { /// Remove given extrinsic from the pool and temporarily ban it to prevent reimporting. #[rpc(name = "author_removeExtrinsic")] - fn remove_extrinsic(&self, - bytes_or_hash: Vec> + fn remove_extrinsic( + &self, + bytes_or_hash: Vec>, ) -> Result>; /// Submit an extrinsic to watch. @@ -85,10 +81,11 @@ pub trait AuthorApi { subscribe, name = "author_submitAndWatchExtrinsic" )] - fn watch_extrinsic(&self, + fn watch_extrinsic( + &self, metadata: Self::Metadata, subscriber: Subscriber>, - bytes: Bytes + bytes: Bytes, ); /// Unsubscribe from extrinsic watching. @@ -97,8 +94,9 @@ pub trait AuthorApi { unsubscribe, name = "author_unwatchExtrinsic" )] - fn unwatch_extrinsic(&self, + fn unwatch_extrinsic( + &self, metadata: Option, - id: SubscriptionId + id: SubscriptionId, ) -> Result; } diff --git a/substrate/client/rpc-api/src/chain/error.rs b/substrate/client/rpc-api/src/chain/error.rs index 59a0c0a2f840f222e2f81a4c4754462290fb4a2b..9bedd328d0015b7df6263793095501e09efc2733 100644 --- a/substrate/client/rpc-api/src/chain/error.rs +++ b/substrate/client/rpc-api/src/chain/error.rs @@ -31,7 +31,7 @@ pub type FutureResult = Box #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { /// Client error. - #[display(fmt="Client error: {}", _0)] + #[display(fmt = "Client error: {}", _0)] Client(Box), /// Other error type. Other(String), diff --git a/substrate/client/rpc-api/src/chain/mod.rs b/substrate/client/rpc-api/src/chain/mod.rs index 5e2d4844130478b131c3875e0b8829240a3333e5..79ae80d0c4d1d34c5795d673d0590a2da154d531 100644 --- a/substrate/client/rpc-api/src/chain/mod.rs +++ b/substrate/client/rpc-api/src/chain/mod.rs @@ -20,11 +20,11 @@ pub mod error; +use self::error::{FutureResult, Result}; use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use sp_rpc::{number::NumberOrHex, list::ListOrValue}; -use self::error::{FutureResult, Result}; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; pub use self::gen_client::Client as ChainClient; diff --git a/substrate/client/rpc-api/src/child_state/mod.rs b/substrate/client/rpc-api/src/child_state/mod.rs index 99990017fd826d4e75cd7f64e3e3aeb312e96a44..7abda0a63134df41f0269b10a065012d2590ffc8 100644 --- a/substrate/client/rpc-api/src/child_state/mod.rs +++ b/substrate/client/rpc-api/src/child_state/mod.rs @@ -18,9 +18,9 @@ //! Substrate state API. -use jsonrpc_derive::rpc; -use sp_core::storage::{StorageKey, PrefixedStorageKey, StorageData}; use crate::state::error::FutureResult; +use jsonrpc_derive::rpc; +use sp_core::storage::{PrefixedStorageKey, StorageData, StorageKey}; pub use self::gen_client::Client as ChildStateClient; use crate::state::ReadProof; @@ -41,7 +41,7 @@ pub trait ChildStateApi { &self, child_storage_key: PrefixedStorageKey, prefix: StorageKey, - hash: Option + hash: Option, ) -> FutureResult>; /// Returns the keys with prefix from a child storage with pagination support. @@ -63,7 +63,7 @@ pub trait ChildStateApi { &self, child_storage_key: PrefixedStorageKey, key: StorageKey, - hash: Option + hash: Option, ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. @@ -72,7 +72,7 @@ pub trait ChildStateApi { &self, child_storage_key: PrefixedStorageKey, key: StorageKey, - hash: Option + hash: Option, ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. @@ -81,7 +81,7 @@ pub trait ChildStateApi { &self, child_storage_key: PrefixedStorageKey, key: StorageKey, - hash: Option + hash: Option, ) -> FutureResult>; /// Returns proof of storage for child key entries at a specific block's state. diff --git a/substrate/client/rpc-api/src/helpers.rs b/substrate/client/rpc-api/src/helpers.rs index e85c26062b50d24e8a195cacc051f282d56d8825..bb37cfbbb780e822fa0a0dbc99b587a617a10add 100644 --- a/substrate/client/rpc-api/src/helpers.rs +++ b/substrate/client/rpc-api/src/helpers.rs @@ -16,8 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use jsonrpc_core::futures::prelude::*; use futures::{channel::oneshot, compat::Compat}; +use jsonrpc_core::futures::prelude::*; /// Wraps around `oneshot::Receiver` and adjusts the error type to produce an internal error if the /// sender gets dropped. diff --git a/substrate/client/rpc-api/src/lib.rs b/substrate/client/rpc-api/src/lib.rs index 814319add2a3e6b9845d60ce4abb1ab57c1f12c3..488ae429c1f429f125cc1625105b3d5fb5a9b201 100644 --- a/substrate/client/rpc-api/src/lib.rs +++ b/substrate/client/rpc-api/src/lib.rs @@ -34,7 +34,7 @@ pub use policy::DenyUnsafe; pub mod author; pub mod chain; +pub mod child_state; pub mod offchain; pub mod state; -pub mod child_state; pub mod system; diff --git a/substrate/client/rpc-api/src/metadata.rs b/substrate/client/rpc-api/src/metadata.rs index efe090acc621ed57bb11f8c0b1d021d523f504a4..bda7b8f7ba36ba339c231f9c1b16f7f7cca1e5c2 100644 --- a/substrate/client/rpc-api/src/metadata.rs +++ b/substrate/client/rpc-api/src/metadata.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use jsonrpc_core::futures::sync::mpsc; -use jsonrpc_pubsub::{Session, PubSubMetadata}; +use jsonrpc_pubsub::{PubSubMetadata, Session}; /// RPC Metadata. /// @@ -42,9 +42,7 @@ impl PubSubMetadata for Metadata { impl Metadata { /// Create new `Metadata` with session (Pub/Sub) support. pub fn new(transport: mpsc::Sender) -> Self { - Metadata { - session: Some(Arc::new(Session::new(transport))), - } + Metadata { session: Some(Arc::new(Session::new(transport))) } } /// Create new `Metadata` for tests. diff --git a/substrate/client/rpc-api/src/offchain/error.rs b/substrate/client/rpc-api/src/offchain/error.rs index f74d419e54424431daf81fe2a3a9bb310a6178b2..f2567707bc5f2d6fe393e1eb8c5bea60dc4d98fb 100644 --- a/substrate/client/rpc-api/src/offchain/error.rs +++ b/substrate/client/rpc-api/src/offchain/error.rs @@ -27,7 +27,7 @@ pub type Result = std::result::Result; #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { /// Unavailable storage kind error. - #[display(fmt="This storage kind is not available yet.")] + #[display(fmt = "This storage kind is not available yet.")] UnavailableStorageKind, /// Call to an unsafe RPC was denied. UnsafeRpcCalled(crate::policy::UnsafeRpcError), @@ -50,7 +50,7 @@ impl From for rpc::Error { match e { Error::UnavailableStorageKind => rpc::Error { code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: "This storage kind is not available yet" .into(), + message: "This storage kind is not available yet".into(), data: None, }, Error::UnsafeRpcCalled(e) => e.into(), diff --git a/substrate/client/rpc-api/src/offchain/mod.rs b/substrate/client/rpc-api/src/offchain/mod.rs index 7a1f6db9e80be12c2313312d493fb5743846456a..333892fc19c4cf25098e267a03d8cf7fe2e31044 100644 --- a/substrate/client/rpc-api/src/offchain/mod.rs +++ b/substrate/client/rpc-api/src/offchain/mod.rs @@ -20,9 +20,9 @@ pub mod error; -use jsonrpc_derive::rpc; use self::error::Result; -use sp_core::{Bytes, offchain::StorageKind}; +use jsonrpc_derive::rpc; +use sp_core::{offchain::StorageKind, Bytes}; pub use self::gen_client::Client as OffchainClient; diff --git a/substrate/client/rpc-api/src/state/error.rs b/substrate/client/rpc-api/src/state/error.rs index 4f2a2c854ae00cafc463cfe539fd6eeab9b298ee..30437246e6ea7bcb85008c9c07b52671f0144db1 100644 --- a/substrate/client/rpc-api/src/state/error.rs +++ b/substrate/client/rpc-api/src/state/error.rs @@ -31,7 +31,7 @@ pub type FutureResult = Box #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { /// Client error. - #[display(fmt="Client error: {}", _0)] + #[display(fmt = "Client error: {}", _0)] Client(Box), /// Provided block range couldn't be resolved to a list of blocks. #[display(fmt = "Cannot resolve a block range ['{:?}' ... '{:?}]. {}", from, to, details)] diff --git a/substrate/client/rpc-api/src/state/helpers.rs b/substrate/client/rpc-api/src/state/helpers.rs index cb7bd380afa518d9fbeff96e929411c0d5e7fdf6..718ad69ac232cebc0d981e8d5a987ba77ff4ed47 100644 --- a/substrate/client/rpc-api/src/state/helpers.rs +++ b/substrate/client/rpc-api/src/state/helpers.rs @@ -18,8 +18,8 @@ //! Substrate state API helpers. +use serde::{Deserialize, Serialize}; use sp_core::Bytes; -use serde::{Serialize, Deserialize}; /// ReadProof struct returned by the RPC #[derive(Debug, PartialEq, Serialize, Deserialize)] diff --git a/substrate/client/rpc-api/src/state/mod.rs b/substrate/client/rpc-api/src/state/mod.rs index 0ebc553b41178d178049dbb045baf63580e61861..b3048d7bb5ff88766078aef89e4da1a336f1c617 100644 --- a/substrate/client/rpc-api/src/state/mod.rs +++ b/substrate/client/rpc-api/src/state/mod.rs @@ -21,16 +21,17 @@ pub mod error; pub mod helpers; +use self::error::FutureResult; use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use sp_core::Bytes; -use sp_core::storage::{StorageKey, StorageData, StorageChangeSet}; +use sp_core::{ + storage::{StorageChangeSet, StorageData, StorageKey}, + Bytes, +}; use sp_version::RuntimeVersion; -use self::error::FutureResult; -pub use self::gen_client::Client as StateClient; -pub use self::helpers::ReadProof; +pub use self::{gen_client::Client as StateClient, helpers::ReadProof}; /// Substrate state API #[rpc] @@ -45,11 +46,16 @@ pub trait StateApi { /// DEPRECATED: Please use `state_getKeysPaged` with proper paging support. /// Returns the keys with prefix, leave empty to get all the keys. #[rpc(name = "state_getKeys")] - fn storage_keys(&self, prefix: StorageKey, hash: Option) -> FutureResult>; + fn storage_keys(&self, prefix: StorageKey, hash: Option) + -> FutureResult>; /// Returns the keys with prefix, leave empty to get all the keys #[rpc(name = "state_getPairs")] - fn storage_pairs(&self, prefix: StorageKey, hash: Option) -> FutureResult>; + fn storage_pairs( + &self, + prefix: StorageKey, + hash: Option, + ) -> FutureResult>; /// Returns the keys with prefix with pagination support. /// Up to `count` keys will be returned. @@ -92,7 +98,7 @@ pub trait StateApi { &self, keys: Vec, block: Hash, - hash: Option + hash: Option, ) -> FutureResult>>; /// Query storage entries (by key) starting at block hash given as the second parameter. @@ -105,7 +111,11 @@ pub trait StateApi { /// Returns proof of storage entries at a specific block's state. #[rpc(name = "state_getReadProof")] - fn read_proof(&self, keys: Vec, hash: Option) -> FutureResult>; + fn read_proof( + &self, + keys: Vec, + hash: Option, + ) -> FutureResult>; /// New runtime version subscription #[pubsub( @@ -114,7 +124,11 @@ pub trait StateApi { name = "state_subscribeRuntimeVersion", alias("chain_subscribeRuntimeVersion") )] - fn subscribe_runtime_version(&self, metadata: Self::Metadata, subscriber: Subscriber); + fn subscribe_runtime_version( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ); /// Unsubscribe from runtime version subscription #[pubsub( @@ -123,18 +137,27 @@ pub trait StateApi { name = "state_unsubscribeRuntimeVersion", alias("chain_unsubscribeRuntimeVersion") )] - fn unsubscribe_runtime_version(&self, metadata: Option, id: SubscriptionId) -> RpcResult; + fn unsubscribe_runtime_version( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; /// New storage subscription #[pubsub(subscription = "state_storage", subscribe, name = "state_subscribeStorage")] fn subscribe_storage( - &self, metadata: Self::Metadata, subscriber: Subscriber>, keys: Option> + &self, + metadata: Self::Metadata, + subscriber: Subscriber>, + keys: Option>, ); /// Unsubscribe from storage subscription #[pubsub(subscription = "state_storage", unsubscribe, name = "state_unsubscribeStorage")] fn unsubscribe_storage( - &self, metadata: Option, id: SubscriptionId + &self, + metadata: Option, + id: SubscriptionId, ) -> RpcResult; /// The `state_traceBlock` RPC provides a way to trace the re-execution of a single @@ -174,7 +197,7 @@ pub trait StateApi { /// renamed/modified however you like, as long as it retains the `.wasm` extension. /// - Run the node with the wasm blob overrides by placing them in a folder with all your runtimes, /// and passing the path of this folder to your chain, e.g.: - /// - `./target/release/polkadot --wasm-runtime-overrides /home/user/my-custom-wasm-runtimes` + /// - `./target/release/polkadot --wasm-runtime-overrides /home/user/my-custom-wasm-runtimes` /// /// You can also find some pre-built tracing enabled wasm runtimes in [substrate-archive][2] /// @@ -199,7 +222,7 @@ pub trait StateApi { /// curl \ /// -H "Content-Type: application/json" \ /// -d '{"id":1, "jsonrpc":"2.0", "method": "state_traceBlock", \ - /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264"]}' \ + /// "params": ["0xb246acf1adea1f801ce15c77a5fa7d8f2eb8fed466978bcee172cc02cf64e264"]}' \ /// http://localhost:9933/ /// ``` /// @@ -207,33 +230,34 @@ pub trait StateApi { /// /// - `block_hash` (param index 0): Hash of the block to trace. /// - `targets` (param index 1): String of comma separated (no spaces) targets. Specified - /// targets match with trace targets by prefix (i.e if a target is in the beginning - /// of a trace target it is considered a match). If an empty string is specified no - /// targets will be filtered out. The majority of targets correspond to Rust module names, - /// and the ones that do not are typically "hardcoded" into span or event location - /// somewhere in the Substrate source code. ("Non-hardcoded" targets typically come from frame - /// support macros.) + /// targets match with trace targets by prefix (i.e if a target is in the beginning + /// of a trace target it is considered a match). If an empty string is specified no + /// targets will be filtered out. The majority of targets correspond to Rust module names, + /// and the ones that do not are typically "hardcoded" into span or event location + /// somewhere in the Substrate source code. ("Non-hardcoded" targets typically come from frame + /// support macros.) /// - `storage_keys` (param index 2): String of comma separated (no spaces) hex encoded - /// (no `0x` prefix) storage keys. If an empty string is specified no events will - /// be filtered out. If anything other than an empty string is specified, events - /// will be filtered by storage key (so non-storage events will **not** show up). - /// You can specify any length of a storage key prefix (i.e. if a specified storage - /// key is in the beginning of an events storage key it is considered a match). - /// Example: for balance tracking on Polkadot & Kusama you would likely want - /// to track changes to account balances with the frame_system::Account storage item, - /// which is a map from `AccountId` to `AccountInfo`. The key filter for this would be - /// the storage prefix for the map: - /// `26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9` - /// Additionally you would want to track the extrinsic index, which is under the - /// `:extrinsic_index` key. The key for this would be the aforementioned string as bytes - /// in hex: `3a65787472696e7369635f696e646578`. - /// The following are some resources to learn more about storage keys in substrate: - /// [substrate storage][1], [transparent keys in substrate][2], - /// [querying substrate storage via rpc][3]. - /// - /// [1]: https://substrate.dev/docs/en/knowledgebase/advanced/storage#storage-map-key - /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ - /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ + /// (no `0x` prefix) storage keys. If an empty string is specified no events will + /// be filtered out. If anything other than an empty string is specified, events + /// will be filtered by storage key (so non-storage events will **not** show up). + /// You can specify any length of a storage key prefix (i.e. if a specified storage + /// key is in the beginning of an events storage key it is considered a match). + /// Example: for balance tracking on Polkadot & Kusama you would likely want + /// to track changes to account balances with the frame_system::Account storage item, + /// which is a map from `AccountId` to `AccountInfo`. The key filter for this would be + /// the storage prefix for the map: + /// `26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9` + /// + /// Additionally you would want to track the extrinsic index, which is under the + /// `:extrinsic_index` key. The key for this would be the aforementioned string as bytes + /// in hex: `3a65787472696e7369635f696e646578`. + /// The following are some resources to learn more about storage keys in substrate: + /// [substrate storage][1], [transparent keys in substrate][2], + /// [querying substrate storage via rpc][3]. + /// + /// [1]: https://substrate.dev/docs/en/knowledgebase/advanced/storage#storage-map-key + /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ + /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ /// /// ### Maximum payload size /// diff --git a/substrate/client/rpc-api/src/system/error.rs b/substrate/client/rpc-api/src/system/error.rs index a0dfd863ce3aa34d9f055a949ca4903c77fa612f..b16a7abb6ea52f57c0c983aa0b59636e18117331 100644 --- a/substrate/client/rpc-api/src/system/error.rs +++ b/substrate/client/rpc-api/src/system/error.rs @@ -48,10 +48,10 @@ impl From for rpc::Error { data: serde_json::to_value(h).ok(), }, Error::MalformattedPeerArg(ref e) => rpc::Error { - code :rpc::ErrorCode::ServerError(BASE_ERROR + 2), + code: rpc::ErrorCode::ServerError(BASE_ERROR + 2), message: e.clone(), data: None, - } + }, } } } diff --git a/substrate/client/rpc-api/src/system/helpers.rs b/substrate/client/rpc-api/src/system/helpers.rs index c8124d9c6752692d3a9f054173f70a6cb817ca16..96e8aeb1ae3d79cd01c7e5973464d8d1600dc6a0 100644 --- a/substrate/client/rpc-api/src/system/helpers.rs +++ b/substrate/client/rpc-api/src/system/helpers.rs @@ -18,9 +18,9 @@ //! Substrate system API helpers. +use sc_chain_spec::{ChainType, Properties}; +use serde::{Deserialize, Serialize}; use std::fmt; -use serde::{Serialize, Deserialize}; -use sc_chain_spec::{Properties, ChainType}; /// Running node's static details. #[derive(Clone, Debug)] @@ -53,9 +53,7 @@ pub struct Health { impl fmt::Display for Health { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{} peers ({})", self.peers, if self.is_syncing { - "syncing" - } else { "idle" }) + write!(fmt, "{} peers ({})", self.peers, if self.is_syncing { "syncing" } else { "idle" }) } } @@ -107,7 +105,8 @@ mod tests { peers: 1, is_syncing: false, should_have_peers: true, - }).unwrap(), + }) + .unwrap(), r#"{"peers":1,"isSyncing":false,"shouldHavePeers":true}"#, ); } @@ -120,7 +119,8 @@ mod tests { roles: "a".into(), best_hash: 5u32, best_number: 6u32, - }).unwrap(), + }) + .unwrap(), r#"{"peerId":"2","roles":"a","bestHash":5,"bestNumber":6}"#, ); } @@ -132,7 +132,8 @@ mod tests { starting_block: 12u32, current_block: 50u32, highest_block: Some(128u32), - }).unwrap(), + }) + .unwrap(), r#"{"startingBlock":12,"currentBlock":50,"highestBlock":128}"#, ); @@ -141,7 +142,8 @@ mod tests { starting_block: 12u32, current_block: 50u32, highest_block: None, - }).unwrap(), + }) + .unwrap(), r#"{"startingBlock":12,"currentBlock":50}"#, ); } diff --git a/substrate/client/rpc-api/src/system/mod.rs b/substrate/client/rpc-api/src/system/mod.rs index e820fb2e702e3766b9426d797667e5154f7d5be9..2f9ed45cd2e25970e4a732b7c075ac754957d190 100644 --- a/substrate/client/rpc-api/src/system/mod.rs +++ b/substrate/client/rpc-api/src/system/mod.rs @@ -22,13 +22,15 @@ pub mod error; pub mod helpers; use crate::helpers::Receiver; +use futures::{compat::Compat, future::BoxFuture}; use jsonrpc_derive::rpc; -use futures::{future::BoxFuture, compat::Compat}; use self::error::Result as SystemResult; -pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole, SyncState}; -pub use self::gen_client::Client as SystemClient; +pub use self::{ + gen_client::Client as SystemClient, + helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}, +}; /// Substrate system RPC API #[rpc] @@ -74,8 +76,9 @@ pub trait SystemApi { /// Returns currently connected peers #[rpc(name = "system_peers", returns = "Vec>")] - fn system_peers(&self) - -> Compat>>>>; + fn system_peers( + &self, + ) -> Compat>>>>; /// Returns current state of the network. /// @@ -84,8 +87,9 @@ pub trait SystemApi { // TODO: the future of this call is uncertain: https://github.com/paritytech/substrate/issues/1890 // https://github.com/paritytech/substrate/issues/5541 #[rpc(name = "system_unstable_networkState", returns = "jsonrpc_core::Value")] - fn system_network_state(&self) - -> Compat>>; + fn system_network_state( + &self, + ) -> Compat>>; /// Adds a reserved peer. Returns the empty string or an error. The string /// parameter should encode a `p2p` multiaddr. @@ -93,14 +97,18 @@ pub trait SystemApi { /// `/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV` /// is an example of a valid, passing multiaddr with PeerId attached. #[rpc(name = "system_addReservedPeer", returns = "()")] - fn system_add_reserved_peer(&self, peer: String) - -> Compat>>; + fn system_add_reserved_peer( + &self, + peer: String, + ) -> Compat>>; /// Remove a reserved peer. Returns the empty string or an error. The string /// should encode only the PeerId e.g. `QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV`. #[rpc(name = "system_removeReservedPeer", returns = "()")] - fn system_remove_reserved_peer(&self, peer_id: String) - -> Compat>>; + fn system_remove_reserved_peer( + &self, + peer_id: String, + ) -> Compat>>; /// Returns the list of reserved peers #[rpc(name = "system_reservedPeers", returns = "Vec")] @@ -121,11 +129,9 @@ pub trait SystemApi { /// /// `sync=debug,state=trace` #[rpc(name = "system_addLogFilter", returns = "()")] - fn system_add_log_filter(&self, directives: String) - -> Result<(), jsonrpc_core::Error>; + fn system_add_log_filter(&self, directives: String) -> Result<(), jsonrpc_core::Error>; /// Resets the log filter to Substrate defaults #[rpc(name = "system_resetLogFilter", returns = "()")] - fn system_reset_log_filter(&self) - -> Result<(), jsonrpc_core::Error>; + fn system_reset_log_filter(&self) -> Result<(), jsonrpc_core::Error>; } diff --git a/substrate/client/rpc-servers/src/lib.rs b/substrate/client/rpc-servers/src/lib.rs index c93451e5cc6780b7d16c96747af3be636dea27e0..7f14cee39f20f2483f36358ca33589f207e0a9cb 100644 --- a/substrate/client/rpc-servers/src/lib.rs +++ b/substrate/client/rpc-servers/src/lib.rs @@ -22,10 +22,10 @@ mod middleware; -use std::io; use jsonrpc_core::{IoHandlerExtension, MetaIoHandler}; use log::error; use pubsub::PubSubMetadata; +use std::io; const MEGABYTE: usize = 1024 * 1024; @@ -42,7 +42,7 @@ const HTTP_THREADS: usize = 4; pub type RpcHandler = pubsub::PubSubHandler; pub use self::inner::*; -pub use middleware::{RpcMiddleware, RpcMetrics}; +pub use middleware::{RpcMetrics, RpcMiddleware}; /// Construct rpc `IoHandler` pub fn rpc_handler( @@ -60,10 +60,12 @@ pub fn rpc_handler( let methods = serde_json::to_value(&methods) .expect("Serialization of Vec is infallible; qed"); - move |_| Ok(serde_json::json!({ - "version": 1, - "methods": methods.clone(), - })) + move |_| { + Ok(serde_json::json!({ + "version": 1, + "methods": methods.clone(), + })) + } }); io } @@ -89,17 +91,14 @@ mod inner { io: RpcHandler, maybe_max_payload_mb: Option, ) -> io::Result { - let max_request_body_size = maybe_max_payload_mb.map(|mb| mb.saturating_mul(MEGABYTE)) + let max_request_body_size = maybe_max_payload_mb + .map(|mb| mb.saturating_mul(MEGABYTE)) .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); http::ServerBuilder::new(io) .threads(thread_pool_size.unwrap_or(HTTP_THREADS)) .health_api(("/health", "system_health")) .allowed_hosts(hosts_filtering(cors.is_some())) - .rest_api(if cors.is_some() { - http::RestApi::Secure - } else { - http::RestApi::Unsecure - }) + .rest_api(if cors.is_some() { http::RestApi::Secure } else { http::RestApi::Unsecure }) .cors(map_cors::(cors)) .max_request_body_size(max_request_body_size) .start_http(addr) @@ -134,28 +133,32 @@ mod inner { io: RpcHandler, maybe_max_payload_mb: Option, ) -> io::Result { - let rpc_max_payload = maybe_max_payload_mb.map(|mb| mb.saturating_mul(MEGABYTE)) + let rpc_max_payload = maybe_max_payload_mb + .map(|mb| mb.saturating_mul(MEGABYTE)) .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); - ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| context.sender().into()) - .max_payload(rpc_max_payload) - .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) - .allowed_origins(map_cors(cors)) - .allowed_hosts(hosts_filtering(cors.is_some())) - .start(addr) - .map_err(|err| match err { - ws::Error::Io(io) => io, - ws::Error::ConnectionClosed => io::ErrorKind::BrokenPipe.into(), - e => { - error!("{}", e); - io::ErrorKind::Other.into() - } - }) + ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| { + context.sender().into() + }) + .max_payload(rpc_max_payload) + .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) + .allowed_origins(map_cors(cors)) + .allowed_hosts(hosts_filtering(cors.is_some())) + .start(addr) + .map_err(|err| match err { + ws::Error::Io(io) => io, + ws::Error::ConnectionClosed => io::ErrorKind::BrokenPipe.into(), + e => { + error!("{}", e); + io::ErrorKind::Other.into() + }, + }) } fn map_cors From<&'a str>>( - cors: Option<&Vec> + cors: Option<&Vec>, ) -> http::DomainsValidation { - cors.map(|x| x.iter().map(AsRef::as_ref).map(Into::into).collect::>()).into() + cors.map(|x| x.iter().map(AsRef::as_ref).map(Into::into).collect::>()) + .into() } fn hosts_filtering(enable: bool) -> http::DomainsValidation { @@ -171,5 +174,4 @@ mod inner { } #[cfg(target_os = "unknown")] -mod inner { -} +mod inner {} diff --git a/substrate/client/rpc-servers/src/middleware.rs b/substrate/client/rpc-servers/src/middleware.rs index 2cbc61716c31712bfa402418e0a89d6053f0fbe1..d87c653e2b250aea11293fba44b2be7b355c5e57 100644 --- a/substrate/client/rpc-servers/src/middleware.rs +++ b/substrate/client/rpc-servers/src/middleware.rs @@ -19,13 +19,9 @@ //! Middleware for RPC requests. use jsonrpc_core::{ - Middleware as RequestMiddleware, Metadata, - Request, Response, FutureResponse, FutureOutput -}; -use prometheus_endpoint::{ - Registry, CounterVec, PrometheusError, - Opts, register, U64 + FutureOutput, FutureResponse, Metadata, Middleware as RequestMiddleware, Request, Response, }; +use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; use futures::{future::Either, Future}; @@ -39,18 +35,17 @@ impl RpcMetrics { /// Create an instance of metrics pub fn new(metrics_registry: Option<&Registry>) -> Result { Ok(Self { - rpc_calls: metrics_registry.map(|r| - register( - CounterVec::new( - Opts::new( - "rpc_calls_total", - "Number of rpc calls received", - ), - &["protocol"] - )?, - r, - ) - ).transpose()?, + rpc_calls: metrics_registry + .map(|r| { + register( + CounterVec::new( + Opts::new("rpc_calls_total", "Number of rpc calls received"), + &["protocol"], + )?, + r, + ) + }) + .transpose()?, }) } } @@ -67,10 +62,7 @@ impl RpcMiddleware { /// - `metrics`: Will be used to report statistics. /// - `transport_label`: The label that is used when reporting the statistics. pub fn new(metrics: RpcMetrics, transport_label: &str) -> Self { - RpcMiddleware { - metrics, - transport_label: String::from(transport_label), - } + RpcMiddleware { metrics, transport_label: String::from(transport_label) } } } diff --git a/substrate/client/rpc/src/author/mod.rs b/substrate/client/rpc/src/author/mod.rs index ed7899d52480199de56172c900d213047bde7032..966959050c172cfb9f9ce0e3c9bb6196c605c9be 100644 --- a/substrate/client/rpc/src/author/mod.rs +++ b/substrate/client/rpc/src/author/mod.rs @@ -21,30 +21,33 @@ #[cfg(test)] mod tests; -use std::{sync::Arc, convert::TryInto}; use log::warn; +use std::{convert::TryInto, sync::Arc}; use sp_blockchain::HeaderBackend; -use rpc::futures::{Sink, Future, future::result}; -use futures::{StreamExt as _, compat::Compat}; -use futures::future::{ready, FutureExt, TryFutureExt}; +use codec::{Decode, Encode}; +use futures::{ + compat::Compat, + future::{ready, FutureExt, TryFutureExt}, + StreamExt as _, +}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use rpc::futures::{future::result, Future, Sink}; use sc_rpc_api::DenyUnsafe; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use codec::{Encode, Decode}; -use sp_core::Bytes; -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_api::ProvideRuntimeApi; -use sp_runtime::generic; use sc_transaction_pool_api::{ - TransactionPool, InPoolTransaction, TransactionStatus, TransactionSource, - BlockHash, TxHash, TransactionFor, error::IntoPoolError, + error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool, + TransactionSource, TransactionStatus, TxHash, }; +use sp_api::ProvideRuntimeApi; +use sp_core::Bytes; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::generic; use sp_session::SessionKeys; +use self::error::{Error, FutureResult, Result}; /// Re-export the API for backward compatibility. pub use sc_rpc_api::author::*; -use self::error::{Error, FutureResult, Result}; /// Authoring API pub struct Author { @@ -69,13 +72,7 @@ impl Author { keystore: SyncCryptoStorePtr, deny_unsafe: DenyUnsafe, ) -> Self { - Author { - client, - pool, - subscriptions, - keystore, - deny_unsafe, - } + Author { client, pool, subscriptions, keystore, deny_unsafe } } } @@ -87,19 +84,14 @@ impl Author { const TX_SOURCE: TransactionSource = TransactionSource::External; impl AuthorApi, BlockHash

> for Author - where - P: TransactionPool + Sync + Send + 'static, - Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: SessionKeys, +where + P: TransactionPool + Sync + Send + 'static, + Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: SessionKeys, { type Metadata = crate::Metadata; - fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) -> Result<()> { + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()> { self.deny_unsafe.check_if_safe()?; let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; @@ -112,20 +104,22 @@ impl AuthorApi, BlockHash

> for Author self.deny_unsafe.check_if_safe()?; let best_block_hash = self.client.info().best_hash; - self.client.runtime_api().generate_session_keys( - &generic::BlockId::Hash(best_block_hash), - None, - ).map(Into::into).map_err(|e| Error::Client(Box::new(e))) + self.client + .runtime_api() + .generate_session_keys(&generic::BlockId::Hash(best_block_hash), None) + .map(Into::into) + .map_err(|e| Error::Client(Box::new(e))) } fn has_session_keys(&self, session_keys: Bytes) -> Result { self.deny_unsafe.check_if_safe()?; let best_block_hash = self.client.info().best_hash; - let keys = self.client.runtime_api().decode_session_keys( - &generic::BlockId::Hash(best_block_hash), - session_keys.to_vec(), - ).map_err(|e| Error::Client(Box::new(e)))? + let keys = self + .client + .runtime_api() + .decode_session_keys(&generic::BlockId::Hash(best_block_hash), session_keys.to_vec()) + .map_err(|e| Error::Client(Box::new(e)))? .ok_or_else(|| Error::InvalidSessionKeys)?; Ok(SyncCryptoStore::has_keys(&*self.keystore, &keys)) @@ -144,12 +138,15 @@ impl AuthorApi, BlockHash

> for Author Err(err) => return Box::new(result(Err(err.into()))), }; let best_block_hash = self.client.info().best_hash; - Box::new(self.pool - .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) - .compat() - .map_err(|e| e.into_pool_error() - .map(Into::into) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into())) + Box::new( + self.pool + .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) + .compat() + .map_err(|e| { + e.into_pool_error() + .map(Into::into) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) + }), ) } @@ -163,7 +160,8 @@ impl AuthorApi, BlockHash

> for Author ) -> Result>> { self.deny_unsafe.check_if_safe()?; - let hashes = bytes_or_hash.into_iter() + let hashes = bytes_or_hash + .into_iter() .map(|x| match x { hash::ExtrinsicOrHash::Hash(h) => Ok(h), hash::ExtrinsicOrHash::Extrinsic(bytes) => { @@ -173,32 +171,31 @@ impl AuthorApi, BlockHash

> for Author }) .collect::>>()?; - Ok( - self.pool - .remove_invalid(&hashes) - .into_iter() - .map(|tx| tx.hash().clone()) - .collect() - ) + Ok(self + .pool + .remove_invalid(&hashes) + .into_iter() + .map(|tx| tx.hash().clone()) + .collect()) } - fn watch_extrinsic(&self, + fn watch_extrinsic( + &self, _metadata: Self::Metadata, subscriber: Subscriber, BlockHash

>>, xt: Bytes, ) { let submit = || -> Result<_> { let best_block_hash = self.client.info().best_hash; - let dxt = TransactionFor::

::decode(&mut &xt[..]) - .map_err(error::Error::from)?; - Ok( - self.pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) - .map_err(|e| e.into_pool_error() + let dxt = TransactionFor::

::decode(&mut &xt[..]).map_err(error::Error::from)?; + Ok(self + .pool + .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) + .map_err(|e| { + e.into_pool_error() .map(error::Error::from) .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) - ) - ) + })) }; let subscriptions = self.subscriptions.clone(); @@ -211,8 +208,7 @@ impl AuthorApi, BlockHash

> for Author .map(move |result| match result { Ok(watcher) => { subscriptions.add(subscriber, move |sink| { - sink - .sink_map_err(|e| log::debug!("Subscription sink failed: {:?}", e)) + sink.sink_map_err(|e| log::debug!("Subscription sink failed: {:?}", e)) .send_all(Compat::new(watcher)) .map(|_| ()) }); @@ -224,14 +220,20 @@ impl AuthorApi, BlockHash

> for Author }, }); - let res = self.subscriptions.executor() + let res = self + .subscriptions + .executor() .execute(Box::new(Compat::new(future.map(|_| Ok(()))))); if res.is_err() { warn!("Error spawning subscription RPC task."); } } - fn unwatch_extrinsic(&self, _metadata: Option, id: SubscriptionId) -> Result { + fn unwatch_extrinsic( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> Result { Ok(self.subscriptions.cancel(id)) } } diff --git a/substrate/client/rpc/src/author/tests.rs b/substrate/client/rpc/src/author/tests.rs index 0e7cb5539501d418870a39bb0b50c7add5e572f8..9da6ff8d13f6eeb2be37e671471494850338f32b 100644 --- a/substrate/client/rpc/src/author/tests.rs +++ b/substrate/client/rpc/src/author/tests.rs @@ -18,37 +18,35 @@ use super::*; -use std::{mem, sync::Arc}; use assert_matches::assert_matches; use codec::Encode; +use futures::{compat::Future01CompatExt, executor}; +use rpc::futures::Stream as _; +use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_core::{ - ed25519, sr25519, - H256, blake2_256, hexdisplay::HexDisplay, testing::{ED25519, SR25519}, + blake2_256, crypto::{CryptoTypePublicPair, Pair, Public}, + ed25519, + hexdisplay::HexDisplay, + sr25519, + testing::{ED25519, SR25519}, + H256, }; use sp_keystore::testing::KeyStore; -use rpc::futures::Stream as _; +use std::{mem, sync::Arc}; use substrate_test_runtime_client::{ - self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, Block}, - DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client, + self, + runtime::{Block, Extrinsic, SessionKeys, Transfer}, + AccountKeyring, Backend, Client, DefaultTestClientBuilderExt, TestClientBuilderExt, }; -use sc_transaction_pool::{BasicPool, FullChainApi}; -use futures::{executor, compat::Future01CompatExt}; fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { - let tx = Transfer { - amount: Default::default(), - nonce, - from: sender.into(), - to: Default::default(), - }; + let tx = + Transfer { amount: Default::default(), nonce, from: sender.into(), to: Default::default() }; tx.into_signed_tx() } -type FullTransactionPool = BasicPool< - FullChainApi, Block>, - Block, ->; +type FullTransactionPool = BasicPool, Block>, Block>; struct TestSetup { pub client: Arc>, @@ -63,18 +61,9 @@ impl Default for TestSetup { let client = Arc::new(client_builder.set_keystore(keystore.clone()).build()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - true.into(), - None, - spawner, - client.clone(), - ); - TestSetup { - client, - keystore, - pool, - } + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); + TestSetup { client, keystore, pool } } } @@ -100,9 +89,7 @@ fn submit_transaction_should_not_cause_error() { AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), Ok(h2) if h == h2 ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err() - ); + assert!(AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err()); } #[test] @@ -115,14 +102,12 @@ fn submit_rich_transaction_should_not_cause_error() { AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), Ok(h2) if h == h2 ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err() - ); + assert!(AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err()); } #[test] fn should_watch_extrinsic() { - //given + // given let setup = TestSetup::default(); let p = setup.author(); @@ -175,14 +160,18 @@ fn should_watch_extrinsic() { #[test] fn should_return_watch_validation_error() { - //given + // given let setup = TestSetup::default(); let p = setup.author(); let (subscriber, id_rx, _data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); // when - p.watch_extrinsic(Default::default(), subscriber, uxt(AccountKeyring::Alice, 179).encode().into()); + p.watch_extrinsic( + Default::default(), + subscriber, + uxt(AccountKeyring::Alice, 179).encode().into(), + ); // then let res = executor::block_on(id_rx.compat()).unwrap(); @@ -215,11 +204,13 @@ fn should_remove_extrinsics() { assert_eq!(setup.pool.status().ready, 3); // now remove all 3 - let removed = p.remove_extrinsic(vec![ - hash::ExtrinsicOrHash::Hash(hash3), - // Removing this one will also remove ex2 - hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), - ]).unwrap(); + let removed = p + .remove_extrinsic(vec![ + hash::ExtrinsicOrHash::Hash(hash3), + // Removing this one will also remove ex2 + hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), + ]) + .unwrap(); assert_eq!(removed.len(), 3); } @@ -235,11 +226,13 @@ fn should_insert_key() { String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), suri.to_string(), key_pair.public().0.to_vec().into(), - ).expect("Insert key"); + ) + .expect("Insert key"); let public_keys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); - assert!(public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, key_pair.public().to_raw_vec()))); + assert!(public_keys + .contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, key_pair.public().to_raw_vec()))); } #[test] @@ -249,14 +242,16 @@ fn should_rotate_keys() { let new_public_keys = p.rotate_keys().expect("Rotates the keys"); - let session_keys = SessionKeys::decode(&mut &new_public_keys[..]) - .expect("SessionKeys decode successfully"); + let session_keys = + SessionKeys::decode(&mut &new_public_keys[..]).expect("SessionKeys decode successfully"); let ed25519_public_keys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); let sr25519_public_keys = SyncCryptoStore::keys(&*setup.keystore, SR25519).unwrap(); - assert!(ed25519_public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); - assert!(sr25519_public_keys.contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); + assert!(ed25519_public_keys + .contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); + assert!(sr25519_public_keys + .contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); } #[test] @@ -264,10 +259,8 @@ fn test_has_session_keys() { let setup = TestSetup::default(); let p = setup.author(); - let non_existent_public_keys = TestSetup::default() - .author() - .rotate_keys() - .expect("Rotates the keys"); + let non_existent_public_keys = + TestSetup::default().author().rotate_keys().expect("Rotates the keys"); let public_keys = p.rotate_keys().expect("Rotates the keys"); let test_vectors = vec![ @@ -295,7 +288,8 @@ fn test_has_key() { String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), suri.to_string(), alice_key_pair.public().0.to_vec().into(), - ).expect("Insert key"); + ) + .expect("Insert key"); let bob_key_pair = ed25519::Pair::from_string("//Bob", None).expect("Generates keypair"); let test_vectors = vec![ @@ -310,7 +304,8 @@ fn test_has_key() { p.has_key( key, String::from_utf8(key_type.0.to_vec()).expect("Keytype is a valid string"), - ).map_err(|e| mem::discriminant(&e)), + ) + .map_err(|e| mem::discriminant(&e)), ); } } diff --git a/substrate/client/rpc/src/chain/chain_full.rs b/substrate/client/rpc/src/chain/chain_full.rs index 9687b13d50fc785c30b168c0298bc845ed447f7a..8d0f622d1e7aa905fb1ef189d0b146fe5d6d42e4 100644 --- a/substrate/client/rpc/src/chain/chain_full.rs +++ b/substrate/client/rpc/src/chain/chain_full.rs @@ -18,16 +18,19 @@ //! Blockchain API backend for full nodes. -use std::sync::Arc; -use rpc::futures::future::result; use jsonrpc_pubsub::manager::SubscriptionManager; +use rpc::futures::future::result; +use std::sync::Arc; -use sc_client_api::{BlockchainEvents, BlockBackend}; -use sp_runtime::{generic::{BlockId, SignedBlock}, traits::{Block as BlockT}}; +use sc_client_api::{BlockBackend, BlockchainEvents}; +use sp_runtime::{ + generic::{BlockId, SignedBlock}, + traits::Block as BlockT, +}; -use super::{ChainBackend, client_err, error::FutureResult}; -use std::marker::PhantomData; +use super::{client_err, error::FutureResult, ChainBackend}; use sp_blockchain::HeaderBackend; +use std::marker::PhantomData; /// Blockchain API backend for full nodes. Reads all the data from local database. pub struct FullChain { @@ -42,15 +45,12 @@ pub struct FullChain { impl FullChain { /// Create new Chain API RPC handler. pub fn new(client: Arc, subscriptions: SubscriptionManager) -> Self { - Self { - client, - subscriptions, - _phantom: PhantomData, - } + Self { client, subscriptions, _phantom: PhantomData } } } -impl ChainBackend for FullChain where +impl ChainBackend for FullChain +where Block: BlockT + 'static, Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, { @@ -63,18 +63,14 @@ impl ChainBackend for FullChain whe } fn header(&self, hash: Option) -> FutureResult> { - Box::new(result(self.client - .header(BlockId::Hash(self.unwrap_or_best(hash))) - .map_err(client_err) + Box::new(result( + self.client.header(BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err), )) } - fn block(&self, hash: Option) - -> FutureResult>> - { - Box::new(result(self.client - .block(&BlockId::Hash(self.unwrap_or_best(hash))) - .map_err(client_err) + fn block(&self, hash: Option) -> FutureResult>> { + Box::new(result( + self.client.block(&BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err), )) } } diff --git a/substrate/client/rpc/src/chain/chain_light.rs b/substrate/client/rpc/src/chain/chain_light.rs index a3f3db9b7116c66164f221007d4f187ec4001f70..ebca664c0f23d9f154b3dba6185034852c24a9b5 100644 --- a/substrate/client/rpc/src/chain/chain_light.rs +++ b/substrate/client/rpc/src/chain/chain_light.rs @@ -18,20 +18,20 @@ //! Blockchain API backend for light nodes. -use std::sync::Arc; use futures::{future::ready, FutureExt, TryFutureExt}; -use rpc::futures::future::{result, Future, Either}; use jsonrpc_pubsub::manager::SubscriptionManager; +use rpc::futures::future::{result, Either, Future}; +use std::sync::Arc; -use sc_client_api::light::{Fetcher, RemoteBodyRequest, RemoteBlockchain}; +use sc_client_api::light::{Fetcher, RemoteBlockchain, RemoteBodyRequest}; use sp_runtime::{ generic::{BlockId, SignedBlock}, - traits::{Block as BlockT}, + traits::Block as BlockT, }; -use super::{ChainBackend, client_err, error::FutureResult}; -use sp_blockchain::HeaderBackend; +use super::{client_err, error::FutureResult, ChainBackend}; use sc_client_api::BlockchainEvents; +use sp_blockchain::HeaderBackend; /// Blockchain API backend for light nodes. Reads all the data from local /// database, if available, or fetches it from remote node otherwise. @@ -54,16 +54,12 @@ impl> LightChain { remote_blockchain: Arc>, fetcher: Arc, ) -> Self { - Self { - client, - subscriptions, - remote_blockchain, - fetcher, - } + Self { client, subscriptions, remote_blockchain, fetcher } } } -impl ChainBackend for LightChain where +impl ChainBackend for LightChain +where Block: BlockT + 'static, Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, F: Fetcher + Send + Sync + 'static, @@ -86,32 +82,32 @@ impl ChainBackend for LightChain) - -> FutureResult>> - { + fn block(&self, hash: Option) -> FutureResult>> { let fetcher = self.fetcher.clone(); - let block = self.header(hash) - .and_then(move |header| match header { - Some(header) => Either::A(fetcher + let block = self.header(hash).and_then(move |header| match header { + Some(header) => Either::A( + fetcher .remote_body(RemoteBodyRequest { header: header.clone(), retry_count: Default::default(), }) .boxed() .compat() - .map(move |body| Some(SignedBlock { - block: Block::new(header, body), - justifications: None, - })) - .map_err(client_err) - ), - None => Either::B(result(Ok(None))), - }); + .map(move |body| { + Some(SignedBlock { block: Block::new(header, body), justifications: None }) + }) + .map_err(client_err), + ), + None => Either::B(result(Ok(None))), + }); Box::new(block) } diff --git a/substrate/client/rpc/src/chain/mod.rs b/substrate/client/rpc/src/chain/mod.rs index 1380927bca2f463e6d02a71f724db2b2d0a21ddb..f78188249f6fde11d21cd5d9cb7e2a4df2449090 100644 --- a/substrate/client/rpc/src/chain/mod.rs +++ b/substrate/client/rpc/src/chain/mod.rs @@ -24,33 +24,36 @@ mod chain_light; #[cfg(test)] mod tests; -use std::sync::Arc; use futures::{future, StreamExt, TryStreamExt}; use log::warn; use rpc::{ - Result as RpcResult, futures::{stream, Future, Sink, Stream}, + Result as RpcResult, }; +use std::sync::Arc; -use sc_client_api::{BlockchainEvents, light::{Fetcher, RemoteBlockchain}}; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use sp_rpc::{number::NumberOrHex, list::ListOrValue}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use sc_client_api::{ + light::{Fetcher, RemoteBlockchain}, + BlockchainEvents, +}; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ generic::{BlockId, SignedBlock}, traits::{Block as BlockT, Header, NumberFor}, }; -use self::error::{Result, Error, FutureResult}; +use self::error::{Error, FutureResult, Result}; +use sc_client_api::BlockBackend; pub use sc_rpc_api::chain::*; use sp_blockchain::HeaderBackend; -use sc_client_api::BlockBackend; /// Blockchain backend API trait ChainBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: HeaderBackend + BlockchainEvents + 'static, +where + Block: BlockT + 'static, + Client: HeaderBackend + BlockchainEvents + 'static, { /// Get client reference. fn client(&self) -> &Arc; @@ -94,7 +97,7 @@ trait ChainBackend: Send + Sync + 'static .header(BlockId::number(block_num)) .map_err(client_err)? .map(|h| h.hash())) - } + }, } } @@ -114,9 +117,12 @@ trait ChainBackend: Send + Sync + 'static self.subscriptions(), subscriber, || self.client().info().best_hash, - || self.client().import_notification_stream() - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), + || { + self.client() + .import_notification_stream() + .map(|notification| Ok::<_, ()>(notification.header)) + .compat() + }, ) } @@ -140,10 +146,13 @@ trait ChainBackend: Send + Sync + 'static self.subscriptions(), subscriber, || self.client().info().best_hash, - || self.client().import_notification_stream() - .filter(|notification| future::ready(notification.is_new_best)) - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), + || { + self.client() + .import_notification_stream() + .filter(|notification| future::ready(notification.is_new_best)) + .map(|notification| Ok::<_, ()>(notification.header)) + .compat() + }, ) } @@ -167,9 +176,12 @@ trait ChainBackend: Send + Sync + 'static self.subscriptions(), subscriber, || self.client().info().finalized_hash, - || self.client().finality_notification_stream() - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), + || { + self.client() + .finality_notification_stream() + .map(|notification| Ok::<_, ()>(notification.header)) + .compat() + }, ) } @@ -188,13 +200,11 @@ pub fn new_full( client: Arc, subscriptions: SubscriptionManager, ) -> Chain - where - Block: BlockT + 'static, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, +where + Block: BlockT + 'static, + Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, { - Chain { - backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)), - } + Chain { backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)) } } /// Create new state API that works on light node. @@ -204,10 +214,10 @@ pub fn new_light>( remote_blockchain: Arc>, fetcher: Arc, ) -> Chain - where - Block: BlockT + 'static, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, - F: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, + F: Send + Sync + 'static, { Chain { backend: Box::new(self::chain_light::LightChain::new( @@ -224,11 +234,11 @@ pub struct Chain { backend: Box>, } -impl ChainApi, Block::Hash, Block::Header, SignedBlock> for - Chain - where - Block: BlockT + 'static, - Client: HeaderBackend + BlockchainEvents + 'static, +impl ChainApi, Block::Hash, Block::Header, SignedBlock> + for Chain +where + Block: BlockT + 'static, + Client: HeaderBackend + BlockchainEvents + 'static, { type Metadata = crate::Metadata; @@ -236,8 +246,7 @@ impl ChainApi, Block::Hash, Block::Header, Signe self.backend.header(hash) } - fn block(&self, hash: Option) -> FutureResult>> - { + fn block(&self, hash: Option) -> FutureResult>> { self.backend.block(hash) } @@ -247,12 +256,13 @@ impl ChainApi, Block::Hash, Block::Header, Signe ) -> Result>> { match number { None => self.backend.block_hash(None).map(ListOrValue::Value), - Some(ListOrValue::Value(number)) => self.backend.block_hash(Some(number)).map(ListOrValue::Value), - Some(ListOrValue::List(list)) => Ok(ListOrValue::List(list - .into_iter() - .map(|number| self.backend.block_hash(Some(number))) - .collect::>()? - )) + Some(ListOrValue::Value(number)) => + self.backend.block_hash(Some(number)).map(ListOrValue::Value), + Some(ListOrValue::List(list)) => Ok(ListOrValue::List( + list.into_iter() + .map(|number| self.backend.block_hash(Some(number))) + .collect::>()?, + )), } } @@ -264,7 +274,11 @@ impl ChainApi, Block::Hash, Block::Header, Signe self.backend.subscribe_all_heads(metadata, subscriber) } - fn unsubscribe_all_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_all_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_all_heads(metadata, id) } @@ -272,15 +286,27 @@ impl ChainApi, Block::Hash, Block::Header, Signe self.backend.subscribe_new_heads(metadata, subscriber) } - fn unsubscribe_new_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_new_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_new_heads(metadata, id) } - fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { + fn subscribe_finalized_heads( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ) { self.backend.subscribe_finalized_heads(metadata, subscriber) } - fn unsubscribe_finalized_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_finalized_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_finalized_heads(metadata, id) } } @@ -298,15 +324,14 @@ fn subscribe_headers( F: FnOnce() -> S, G: FnOnce() -> Block::Hash, ERR: ::std::fmt::Debug, - S: Stream + Send + 'static, + S: Stream + Send + 'static, { subscriptions.add(subscriber, |sink| { // send current head right at the start. - let header = client.header(BlockId::Hash(best_block_hash())) + let header = client + .header(BlockId::Hash(best_block_hash())) .map_err(client_err) - .and_then(|header| { - header.ok_or_else(|| "Best header missing.".to_owned().into()) - }) + .and_then(|header| header.ok_or_else(|| "Best header missing.".to_owned().into())) .map_err(Into::into); // send further subscriptions @@ -314,12 +339,8 @@ fn subscribe_headers( .map(|res| Ok(res)) .map_err(|e| warn!("Block notification stream error: {:?}", e)); - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all( - stream::iter_result(vec![Ok(header)]) - .chain(stream) - ) + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(stream::iter_result(vec![Ok(header)]).chain(stream)) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) }); diff --git a/substrate/client/rpc/src/chain/tests.rs b/substrate/client/rpc/src/chain/tests.rs index bb673d65ea0f2ca977cfea51706f021cbe2ea190..9bd08a1796adc80455de75549483c93e0cc93b01 100644 --- a/substrate/client/rpc/src/chain/tests.rs +++ b/substrate/client/rpc/src/chain/tests.rs @@ -17,16 +17,19 @@ // along with this program. If not, see . use super::*; +use crate::testing::TaskExecutor; use assert_matches::assert_matches; +use futures::{ + compat::{Future01CompatExt, Stream01CompatExt}, + executor, +}; +use sc_block_builder::BlockBuilderProvider; +use sp_rpc::list::ListOrValue; use substrate_test_runtime_client::{ prelude::*, + runtime::{Block, Header, H256}, sp_consensus::BlockOrigin, - runtime::{H256, Block, Header}, }; -use sp_rpc::list::ListOrValue; -use sc_block_builder::BlockBuilderProvider; -use futures::{executor, compat::{Future01CompatExt, Stream01CompatExt}}; -use crate::testing::TaskExecutor; #[test] fn should_return_header() { @@ -105,10 +108,7 @@ fn should_return_a_block() { } ); - assert_matches!( - api.block(Some(H256::from_low_u64_be(5)).into()).wait(), - Ok(None) - ); + assert_matches!(api.block(Some(H256::from_low_u64_be(5)).into()).wait(), Ok(None)); } #[test] @@ -121,7 +121,6 @@ fn should_return_block_hash() { Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() ); - assert_matches!( api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() @@ -154,7 +153,6 @@ fn should_return_block_hash() { ); } - #[test] fn should_return_finalized_hash() { let mut client = Arc::new(substrate_test_runtime_client::new()); @@ -193,10 +191,7 @@ fn should_notify_about_latest_block() { api.subscribe_all_heads(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id.compat()), Ok(Ok(SubscriptionId::String(_))))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); @@ -223,10 +218,7 @@ fn should_notify_about_best_block() { api.subscribe_new_heads(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id.compat()), Ok(Ok(SubscriptionId::String(_))))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); @@ -253,10 +245,7 @@ fn should_notify_about_finalized_block() { api.subscribe_finalized_heads(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id.compat()), Ok(Ok(SubscriptionId::String(_))))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); diff --git a/substrate/client/rpc/src/lib.rs b/substrate/client/rpc/src/lib.rs index 7b3af8cb2f3281c4fee110ac11801d7c94238436..ebdec6647f43ab319a6cf8fc999b1261a166bb65 100644 --- a/substrate/client/rpc/src/lib.rs +++ b/substrate/client/rpc/src/lib.rs @@ -23,12 +23,12 @@ #![warn(missing_docs)] use futures::{compat::Future01CompatExt, FutureExt}; -use rpc::futures::future::{Executor, ExecuteError, Future}; +use rpc::futures::future::{ExecuteError, Executor, Future}; use sp_core::traits::SpawnNamed; use std::sync::Arc; -pub use sc_rpc_api::{DenyUnsafe, Metadata}; pub use rpc::IoHandlerExtension as RpcExtension; +pub use sc_rpc_api::{DenyUnsafe, Metadata}; pub mod author; pub mod chain; diff --git a/substrate/client/rpc/src/offchain/mod.rs b/substrate/client/rpc/src/offchain/mod.rs index dbb48a9e519342c9a8ecf42984d932d5ca91f817..9d1cc702b51e01c880bb7c9f669057125cc081ca 100644 --- a/substrate/client/rpc/src/offchain/mod.rs +++ b/substrate/client/rpc/src/offchain/mod.rs @@ -21,15 +21,15 @@ #[cfg(test)] mod tests; +use self::error::{Error, Result}; +use parking_lot::RwLock; /// Re-export the API for backward compatibility. pub use sc_rpc_api::offchain::*; use sc_rpc_api::DenyUnsafe; -use self::error::{Error, Result}; use sp_core::{ - Bytes, offchain::{OffchainStorage, StorageKind}, + Bytes, }; -use parking_lot::RwLock; use std::sync::Arc; /// Offchain API @@ -43,10 +43,7 @@ pub struct Offchain { impl Offchain { /// Create new instance of Offchain API. pub fn new(storage: T, deny_unsafe: DenyUnsafe) -> Self { - Offchain { - storage: Arc::new(RwLock::new(storage)), - deny_unsafe, - } + Offchain { storage: Arc::new(RwLock::new(storage)), deny_unsafe } } } diff --git a/substrate/client/rpc/src/offchain/tests.rs b/substrate/client/rpc/src/offchain/tests.rs index b8054d816325f4df20377bc4b7c4449ea091cb50..f9629e70198a3317e259d4daebffb0da396c8822 100644 --- a/substrate/client/rpc/src/offchain/tests.rs +++ b/substrate/client/rpc/src/offchain/tests.rs @@ -18,7 +18,7 @@ use super::*; use assert_matches::assert_matches; -use sp_core::{Bytes, offchain::storage::InMemOffchainStorage}; +use sp_core::{offchain::storage::InMemOffchainStorage, Bytes}; #[test] fn local_storage_should_work() { diff --git a/substrate/client/rpc/src/state/mod.rs b/substrate/client/rpc/src/state/mod.rs index 35680b0fa41dd51c25b36eca425c6a4560ad062a..9137404df3ee2eca8755b14ac2ba5820ab91ac78 100644 --- a/substrate/client/rpc/src/state/mod.rs +++ b/substrate/client/rpc/src/state/mod.rs @@ -24,34 +24,39 @@ mod state_light; #[cfg(test)] mod tests; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use rpc::{ + futures::{future::result, Future}, + Result as RpcResult, +}; use std::sync::Arc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use rpc::{Result as RpcResult, futures::{Future, future::result}}; -use sc_rpc_api::{DenyUnsafe, state::ReadProof}; -use sc_client_api::light::{RemoteBlockchain, Fetcher}; -use sp_core::{Bytes, storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}}; -use sp_version::RuntimeVersion; +use sc_client_api::light::{Fetcher, RemoteBlockchain}; +use sc_rpc_api::{state::ReadProof, DenyUnsafe}; +use sp_core::{ + storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, + Bytes, +}; use sp_runtime::traits::Block as BlockT; +use sp_version::RuntimeVersion; -use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; +use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; use self::error::{Error, FutureResult}; -pub use sc_rpc_api::state::*; -pub use sc_rpc_api::child_state::*; use sc_client_api::{ - ExecutorProvider, StorageProvider, BlockchainEvents, Backend, BlockBackend, ProofProvider + Backend, BlockBackend, BlockchainEvents, ExecutorProvider, ProofProvider, StorageProvider, }; -use sp_blockchain::{HeaderMetadata, HeaderBackend}; +pub use sc_rpc_api::{child_state::*, state::*}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; /// State backend API. pub trait StateBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { /// Call runtime method at given block. fn call( @@ -129,7 +134,7 @@ pub trait StateBackend: Send + Sync + 'static fn query_storage_at( &self, keys: Vec, - at: Option + at: Option, ) -> FutureResult>>; /// Returns proof of storage entries at a specific block's state. @@ -184,21 +189,30 @@ pub fn new_full( deny_unsafe: DenyUnsafe, rpc_max_payload: Option, ) -> (State, ChildState) - where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + ProofProvider - + HeaderMetadata + BlockchainEvents - + CallApiAt + HeaderBackend - + BlockBackend + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: Metadata, +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + + StorageProvider + + ProofProvider + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + HeaderBackend + + BlockBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: Metadata, { - let child_backend = Box::new( - self::state_full::FullState::new( - client.clone(), subscriptions.clone(), rpc_max_payload - ) - ); - let backend = Box::new(self::state_full::FullState::new(client, subscriptions, rpc_max_payload)); + let child_backend = Box::new(self::state_full::FullState::new( + client.clone(), + subscriptions.clone(), + rpc_max_payload, + )); + let backend = + Box::new(self::state_full::FullState::new(client, subscriptions, rpc_max_payload)); (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } @@ -210,27 +224,32 @@ pub fn new_light>( fetcher: Arc, deny_unsafe: DenyUnsafe, ) -> (State, ChildState) - where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider - + HeaderMetadata - + ProvideRuntimeApi + HeaderBackend + BlockchainEvents - + Send + Sync + 'static, - F: Send + Sync + 'static, +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + + StorageProvider + + HeaderMetadata + + ProvideRuntimeApi + + HeaderBackend + + BlockchainEvents + + Send + + Sync + + 'static, + F: Send + Sync + 'static, { let child_backend = Box::new(self::state_light::LightState::new( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - fetcher.clone(), + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + fetcher.clone(), )); let backend = Box::new(self::state_light::LightState::new( - client, - subscriptions, - remote_blockchain, - fetcher, + client, + subscriptions, + remote_blockchain, + fetcher, )); (State { backend, deny_unsafe }, ChildState { backend: child_backend }) } @@ -243,9 +262,9 @@ pub struct State { } impl StateApi for State - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { type Metadata = crate::Metadata; @@ -281,25 +300,35 @@ impl StateApi for State block: Option, ) -> FutureResult> { if count > STORAGE_KEYS_PAGED_MAX_COUNT { - return Box::new(result(Err( - Error::InvalidCount { - value: count, - max: STORAGE_KEYS_PAGED_MAX_COUNT, - } - ))); + return Box::new(result(Err(Error::InvalidCount { + value: count, + max: STORAGE_KEYS_PAGED_MAX_COUNT, + }))) } self.backend.storage_keys_paged(block, prefix, count, start_key) } - fn storage(&self, key: StorageKey, block: Option) -> FutureResult> { + fn storage( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { self.backend.storage(block, key) } - fn storage_hash(&self, key: StorageKey, block: Option) -> FutureResult> { + fn storage_hash( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { self.backend.storage_hash(block, key) } - fn storage_size(&self, key: StorageKey, block: Option) -> FutureResult> { + fn storage_size( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { self.backend.storage_size(block, key) } @@ -311,7 +340,7 @@ impl StateApi for State &self, keys: Vec, from: Block::Hash, - to: Option + to: Option, ) -> FutureResult>> { if let Err(err) = self.deny_unsafe.check_if_safe() { return Box::new(result(Err(err.into()))) @@ -323,12 +352,16 @@ impl StateApi for State fn query_storage_at( &self, keys: Vec, - at: Option + at: Option, ) -> FutureResult>> { self.backend.query_storage_at(keys, at) } - fn read_proof(&self, keys: Vec, block: Option) -> FutureResult> { + fn read_proof( + &self, + keys: Vec, + block: Option, + ) -> FutureResult> { self.backend.read_proof(block, keys) } @@ -336,12 +369,16 @@ impl StateApi for State &self, meta: Self::Metadata, subscriber: Subscriber>, - keys: Option> + keys: Option>, ) { self.backend.subscribe_storage(meta, subscriber, keys); } - fn unsubscribe_storage(&self, meta: Option, id: SubscriptionId) -> RpcResult { + fn unsubscribe_storage( + &self, + meta: Option, + id: SubscriptionId, + ) -> RpcResult { self.backend.unsubscribe_storage(meta, id) } @@ -349,7 +386,11 @@ impl StateApi for State self.backend.runtime_version(at) } - fn subscribe_runtime_version(&self, meta: Self::Metadata, subscriber: Subscriber) { + fn subscribe_runtime_version( + &self, + meta: Self::Metadata, + subscriber: Subscriber, + ) { self.backend.subscribe_runtime_version(meta, subscriber); } @@ -367,9 +408,10 @@ impl StateApi for State /// Note: requires the node to run with `--rpc-methods=Unsafe`. /// Note: requires runtimes compiled with wasm tracing support, `--features with-tracing`. fn trace_block( - &self, block: Block::Hash, + &self, + block: Block::Hash, targets: Option, - storage_keys: Option + storage_keys: Option, ) -> FutureResult { if let Err(err) = self.deny_unsafe.check_if_safe() { return Box::new(result(Err(err.into()))) @@ -381,9 +423,9 @@ impl StateApi for State /// Child state backend API. pub trait ChildStateBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { /// Returns proof of storage for a child key entries at a specific block's state. fn read_child_proof( @@ -435,8 +477,7 @@ pub trait ChildStateBackend: Send + Sync + 'static storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { - Box::new(self.storage(block, storage_key, key) - .map(|x| x.map(|x| x.0.len() as u64))) + Box::new(self.storage(block, storage_key, key).map(|x| x.map(|x| x.0.len() as u64))) } } @@ -446,9 +487,9 @@ pub struct ChildState { } impl ChildStateApi for ChildState - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { type Metadata = crate::Metadata; @@ -465,7 +506,7 @@ impl ChildStateApi for ChildState &self, storage_key: PrefixedStorageKey, key: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage(block, storage_key, key) } @@ -474,7 +515,7 @@ impl ChildStateApi for ChildState &self, storage_key: PrefixedStorageKey, key_prefix: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage_keys(block, storage_key, key_prefix) } @@ -494,7 +535,7 @@ impl ChildStateApi for ChildState &self, storage_key: PrefixedStorageKey, key: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage_hash(block, storage_key, key) } @@ -503,11 +544,10 @@ impl ChildStateApi for ChildState &self, storage_key: PrefixedStorageKey, key: StorageKey, - block: Option + block: Option, ) -> FutureResult> { self.backend.storage_size(block, storage_key, key) } - } fn client_err(err: sp_blockchain::Error) -> Error { diff --git a/substrate/client/rpc/src/state/state_full.rs b/substrate/client/rpc/src/state/state_full.rs index 58209e452e818bc9e930c968b69b856b56af4111..313e89bdf80b4e15a6da9116a920b68d1759c060 100644 --- a/substrate/client/rpc/src/state/state_full.rs +++ b/substrate/client/rpc/src/state/state_full.rs @@ -18,36 +18,49 @@ //! State API backend for full nodes. -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::ops::Range; use futures::{future, StreamExt as _, TryStreamExt as _}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use log::warn; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; -use rpc::{Result as RpcResult, futures::{stream, Future, Sink, Stream, future::result}}; +use rpc::{ + futures::{future::result, stream, Future, Sink, Stream}, + Result as RpcResult, +}; +use std::{ + collections::{BTreeMap, HashMap}, + ops::Range, + sync::Arc, +}; use sc_rpc_api::state::ReadProof; use sp_blockchain::{ - Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, - HeaderBackend + CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata, + Result as ClientResult, }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, - ChildInfo, ChildType, PrefixedStorageKey}, + storage::{ + well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChangeSet, StorageData, + StorageKey, + }, + Bytes, }; -use sp_version::RuntimeVersion; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion, CheckedSub}, + generic::BlockId, + traits::{Block as BlockT, CheckedSub, NumberFor, SaturatedConversion}, }; +use sp_version::RuntimeVersion; -use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; +use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; -use std::marker::PhantomData; +use super::{ + client_err, + error::{Error, FutureResult, Result}, + ChildStateBackend, StateBackend, +}; use sc_client_api::{ - Backend, BlockBackend, BlockchainEvents, CallExecutor, StorageProvider, ExecutorProvider, - ProofProvider + Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider, + StorageProvider, }; +use std::marker::PhantomData; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -72,11 +85,13 @@ pub struct FullState { } impl FullState - where - BE: Backend, - Client: StorageProvider + HeaderBackend + BlockBackend - + HeaderMetadata, - Block: BlockT + 'static, +where + BE: Backend, + Client: StorageProvider + + HeaderBackend + + BlockBackend + + HeaderMetadata, + Block: BlockT + 'static, { /// Create new state API backend for full nodes. pub fn new( @@ -98,16 +113,23 @@ impl FullState fn split_query_storage_range( &self, from: Block::Hash, - to: Option + to: Option, ) -> Result> { - let to = self.block_or_best(to).map_err(|e| invalid_block::(from, to, e.to_string()))?; + let to = self + .block_or_best(to) + .map_err(|e| invalid_block::(from, to, e.to_string()))?; - let invalid_block_err = |e: ClientError| invalid_block::(from, Some(to), e.to_string()); + let invalid_block_err = + |e: ClientError| invalid_block::(from, Some(to), e.to_string()); let from_meta = self.client.header_metadata(from).map_err(invalid_block_err)?; let to_meta = self.client.header_metadata(to).map_err(invalid_block_err)?; if from_meta.number > to_meta.number { - return Err(invalid_block_range(&from_meta, &to_meta, "from number > to number".to_owned())) + return Err(invalid_block_range( + &from_meta, + &to_meta, + "from number > to number".to_owned(), + )) } // check if we can get from `to` to `from` by going through parent_hashes. @@ -116,28 +138,33 @@ impl FullState let mut hashes = vec![to_meta.hash]; let mut last = to_meta.clone(); while last.number > from_number { - let header_metadata = self.client + let header_metadata = self + .client .header_metadata(last.parent) .map_err(|e| invalid_block_range::(&last, &to_meta, e.to_string()))?; hashes.push(header_metadata.hash); last = header_metadata; } if last.hash != from_meta.hash { - return Err(invalid_block_range(&from_meta, &to_meta, "from and to are on different forks".to_owned())) + return Err(invalid_block_range( + &from_meta, + &to_meta, + "from and to are on different forks".to_owned(), + )) } hashes.reverse(); hashes }; // check if we can filter blocks-with-changes from some (sub)range using changes tries - let changes_trie_range = self.client + let changes_trie_range = self + .client .max_key_changes_range(from_number, BlockId::Hash(to_meta.hash)) .map_err(client_err)?; - let filtered_range_begin = changes_trie_range - .and_then(|(begin, _)| { - // avoids a corner case where begin < from_number (happens when querying genesis) - begin.checked_sub(&from_number).map(|x| x.saturated_into::()) - }); + let filtered_range_begin = changes_trie_range.and_then(|(begin, _)| { + // avoids a corner case where begin < from_number (happens when querying genesis) + begin.checked_sub(&from_number).map(|x| x.saturated_into::()) + }); let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin); Ok(QueryStorageRange { @@ -158,7 +185,8 @@ impl FullState ) -> Result<()> { for block in range.unfiltered_range.start..range.unfiltered_range.end { let block_hash = range.hashes[block].clone(); - let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; + let mut block_changes = + StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; let id = BlockId::hash(block_hash); for key in keys { let (has_changed, data) = { @@ -191,30 +219,34 @@ impl FullState let (begin, end) = match range.filtered_range { Some(ref filtered_range) => ( range.first_number + filtered_range.start.saturated_into(), - BlockId::Hash(range.hashes[filtered_range.end - 1].clone()) + BlockId::Hash(range.hashes[filtered_range.end - 1].clone()), ), None => return Ok(()), }; - let mut changes_map: BTreeMap, StorageChangeSet> = BTreeMap::new(); + let mut changes_map: BTreeMap, StorageChangeSet> = + BTreeMap::new(); for key in keys { let mut last_block = None; let mut last_value = last_values.get(key).cloned().unwrap_or_default(); let key_changes = self.client.key_changes(begin, end, None, key).map_err(client_err)?; for (block, _) in key_changes.into_iter().rev() { if last_block == Some(block) { - continue; + continue } - let block_hash = range.hashes[(block - range.first_number).saturated_into::()].clone(); + let block_hash = + range.hashes[(block - range.first_number).saturated_into::()].clone(); let id = BlockId::Hash(block_hash); let value_at_block = self.client.storage(&id, key).map_err(client_err)?; if last_value == value_at_block { - continue; + continue } - changes_map.entry(block) + changes_map + .entry(block) .or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() }) - .changes.push((key.clone(), value_at_block.clone())); + .changes + .push((key.clone(), value_at_block.clone())); last_block = Some(block); last_value = value_at_block; } @@ -227,15 +259,22 @@ impl FullState } } -impl StateBackend for FullState where +impl StateBackend for FullState +where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider - + ProofProvider + HeaderBackend - + HeaderMetadata + BlockchainEvents - + CallApiAt + ProvideRuntimeApi + Client: ExecutorProvider + + StorageProvider + + ProofProvider + + HeaderBackend + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + ProvideRuntimeApi + BlockBackend - + Send + Sync + 'static, + + Send + + Sync + + 'static, Client::Api: Metadata, { fn call( @@ -244,19 +283,21 @@ impl StateBackend for FullState FutureResult { - let r = self.block_or_best(block) - .and_then(|block| self - .client - .executor() - .call( - &BlockId::Hash(block), - &method, - &*call_data, - self.client.execution_extensions().strategies().other, - None, - ) - .map(Into::into) - ).map_err(client_err); + let r = self + .block_or_best(block) + .and_then(|block| { + self.client + .executor() + .call( + &BlockId::Hash(block), + &method, + &*call_data, + self.client.execution_extensions().strategies().other, + None, + ) + .map(Into::into) + }) + .map_err(client_err); Box::new(result(r)) } @@ -268,7 +309,8 @@ impl StateBackend for FullState StateBackend for FullState StateBackend for FullState FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| + .and_then(|block| { self.client.storage_keys_iter( - &BlockId::Hash(block), prefix.as_ref(), start_key.as_ref() + &BlockId::Hash(block), + prefix.as_ref(), + start_key.as_ref(), ) - ) + }) .map(|iter| iter.take(count as usize).collect()) - .map_err(client_err))) + .map_err(client_err), + )) } fn storage( @@ -308,7 +354,8 @@ impl StateBackend for FullState StateBackend for FullState(); if item_sum > 0 { @@ -337,7 +385,7 @@ impl StateBackend for FullState StateBackend for FullState) -> FutureResult { - Box::new(result( - self.block_or_best(block) - .map_err(client_err) - .and_then(|block| - self.client.runtime_api().metadata(&BlockId::Hash(block)) - .map(Into::into) - .map_err(|e| Error::Client(Box::new(e)))) - )) + Box::new(result(self.block_or_best(block).map_err(client_err).and_then(|block| { + self.client + .runtime_api() + .metadata(&BlockId::Hash(block)) + .map(Into::into) + .map_err(|e| Error::Client(Box::new(e))) + }))) } fn runtime_version(&self, block: Option) -> FutureResult { - Box::new(result( - self.block_or_best(block) - .map_err(client_err) - .and_then(|block| - self.client.runtime_version_at(&BlockId::Hash(block)) - .map_err(|e| Error::Client(Box::new(e))) - ) - )) + Box::new(result(self.block_or_best(block).map_err(client_err).and_then(|block| { + self.client + .runtime_version_at(&BlockId::Hash(block)) + .map_err(|e| Error::Client(Box::new(e))) + }))) } fn query_storage( @@ -394,7 +439,7 @@ impl StateBackend for FullState, - at: Option + at: Option, ) -> FutureResult>> { let at = at.unwrap_or_else(|| self.client.info().best_hash); self.query_storage(at, Some(at), keys) @@ -432,14 +477,12 @@ impl StateBackend for FullState stream, Err(err) => { let _ = subscriber.reject(Error::from(client_err(err)).into()); - return; - } + return + }, }; self.subscriptions.add(subscriber, |sink| { - let version = self.runtime_version(None.into()) - .map_err(Into::into) - .wait(); + let version = self.runtime_version(None.into()).map_err(Into::into).wait(); let client = self.client.clone(); let mut previous_version = version.clone(); @@ -460,12 +503,8 @@ impl StateBackend for FullState StateBackend for FullState>, ) { let keys = Into::>>::into(keys); - let stream = match self.client.storage_changes_notification_stream( - keys.as_ref().map(|x| &**x), - None - ) { + let stream = match self + .client + .storage_changes_notification_stream(keys.as_ref().map(|x| &**x), None) + { Ok(stream) => stream, Err(err) => { let _ = subscriber.reject(client_err(err).into()); - return; + return }, }; // initial values - let initial = stream::iter_result(keys - .map(|keys| { + let initial = stream::iter_result( + keys.map(|keys| { let block = self.client.info().best_hash; let changes = keys .into_iter() - .map(|key| StateBackend::storage(self, Some(block.clone()).into(), key.clone()) - .map(|val| (key.clone(), val)) - .wait() - .unwrap_or_else(|_| (key, None)) - ) + .map(|key| { + StateBackend::storage(self, Some(block.clone()).into(), key.clone()) + .map(|val| (key.clone(), val)) + .wait() + .unwrap_or_else(|_| (key, None)) + }) .collect(); vec![Ok(Ok(StorageChangeSet { block, changes }))] - }).unwrap_or_default()); + }) + .unwrap_or_default(), + ); self.subscriptions.add(subscriber, |sink| { let stream = stream - .map(|(block, changes)| Ok::<_, ()>(Ok(StorageChangeSet { - block, - changes: changes.iter() - .filter_map(|(o_sk, k, v)| if o_sk.is_none() { - Some((k.clone(),v.cloned())) - } else { None }).collect(), - }))) + .map(|(block, changes)| { + Ok::<_, ()>(Ok(StorageChangeSet { + block, + changes: changes + .iter() + .filter_map(|(o_sk, k, v)| { + if o_sk.is_none() { + Some((k.clone(), v.cloned())) + } else { + None + } + }) + .collect(), + })) + }) .compat(); - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) .send_all(initial.chain(stream)) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) @@ -553,21 +602,29 @@ impl StateBackend for FullState(block, None, e.to_string())) + block_executor + .trace_block() + .map_err(|e| invalid_block::(block, None, e.to_string())), )) } } -impl ChildStateBackend for FullState where +impl ChildStateBackend for FullState +where Block: BlockT + 'static, BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + Client: ExecutorProvider + + StorageProvider + ProofProvider - + HeaderBackend + BlockBackend - + HeaderMetadata + BlockchainEvents - + CallApiAt + ProvideRuntimeApi - + Send + Sync + 'static, + + HeaderBackend + + BlockBackend + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + ProvideRuntimeApi + + Send + + Sync + + 'static, Client::Api: Metadata, { fn read_child_proof( @@ -580,7 +637,8 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client @@ -606,16 +664,14 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_keys( - &BlockId::Hash(block), - &child_info, - &prefix, - ) + self.client.child_storage_keys(&BlockId::Hash(block), &child_info, &prefix) }) - .map_err(client_err))) + .map_err(client_err), + )) } fn storage_keys_paged( @@ -630,15 +686,20 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; self.client.child_storage_keys_iter( - &BlockId::Hash(block), child_info, prefix.as_ref(), start_key.as_ref(), + &BlockId::Hash(block), + child_info, + prefix.as_ref(), + start_key.as_ref(), ) }) .map(|iter| iter.take(count as usize).collect()) - .map_err(client_err))) + .map_err(client_err), + )) } fn storage( @@ -651,16 +712,14 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage( - &BlockId::Hash(block), - &child_info, - &key, - ) + self.client.child_storage(&BlockId::Hash(block), &child_info, &key) }) - .map_err(client_err))) + .map_err(client_err), + )) } fn storage_hash( @@ -673,23 +732,24 @@ impl ChildStateBackend for FullState ChildInfo::new_default(storage_key), + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), None => return Err(sp_blockchain::Error::InvalidChildStorageKey), }; - self.client.child_storage_hash( - &BlockId::Hash(block), - &child_info, - &key, - ) + self.client.child_storage_hash(&BlockId::Hash(block), &child_info, &key) }) - .map_err(client_err))) + .map_err(client_err), + )) } } /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. -pub(crate) fn split_range(size: usize, middle: Option) -> (Range, Option>) { +pub(crate) fn split_range( + size: usize, + middle: Option, +) -> (Range, Option>) { // check if we can filter blocks-with-changes from some (sub)range using changes tries let range2_begin = match middle { // some of required changes tries are pruned => use available tries @@ -714,21 +774,9 @@ fn invalid_block_range( ) -> Error { let to_string = |h: &CachedHeaderMetadata| format!("{} ({:?})", h.number, h.hash); - Error::InvalidBlockRange { - from: to_string(from), - to: to_string(to), - details, - } + Error::InvalidBlockRange { from: to_string(from), to: to_string(to), details } } -fn invalid_block( - from: B::Hash, - to: Option, - details: String, -) -> Error { - Error::InvalidBlockRange { - from: format!("{:?}", from), - to: format!("{:?}", to), - details, - } +fn invalid_block(from: B::Hash, to: Option, details: String) -> Error { + Error::InvalidBlockRange { from: format!("{:?}", from), to: format!("{:?}", to), details } } diff --git a/substrate/client/rpc/src/state/state_light.rs b/substrate/client/rpc/src/state/state_light.rs index a2f69df9d02710b95a3e71460cf179f7db6bb5ba..274eabe376d988e94cad39b7b166c67b9cb982ff 100644 --- a/substrate/client/rpc/src/state/state_light.rs +++ b/substrate/client/rpc/src/state/state_light.rs @@ -18,45 +18,53 @@ //! State API backend for light nodes. -use std::{ - sync::Arc, - collections::{HashSet, HashMap, hash_map::Entry}, -}; use codec::Decode; use futures::{ - future::{ready, Either}, channel::oneshot::{channel, Sender}, - FutureExt, TryFutureExt, - StreamExt as _, TryStreamExt as _, + future::{ready, Either}, + FutureExt, StreamExt as _, TryFutureExt, TryStreamExt as _, }; use hash_db::Hasher; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId, manager::SubscriptionManager}; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use log::warn; use parking_lot::Mutex; use rpc::{ + futures::{ + future::{result, Future}, + stream::Stream, + Sink, + }, Result as RpcResult, - futures::Sink, - futures::future::{result, Future}, - futures::stream::Stream, +}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::Arc, }; -use sc_rpc_api::state::ReadProof; -use sp_blockchain::{Error as ClientError, HeaderBackend}; use sc_client_api::{ - BlockchainEvents, light::{ - RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest, - RemoteBlockchain, Fetcher, future_header, + future_header, Fetcher, RemoteBlockchain, RemoteCallRequest, RemoteReadChildRequest, + RemoteReadRequest, }, + BlockchainEvents, }; +use sc_rpc_api::state::ReadProof; +use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_core::{ + storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, Bytes, OpaqueMetadata, - storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor}, }; use sp_version::RuntimeVersion; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err}; +use super::{ + client_err, + error::{Error, FutureResult}, + ChildStateBackend, StateBackend, +}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; @@ -77,11 +85,7 @@ trait SharedRequests: Clone + Send + Sync { /// Tries to listen for already issued request, or issues request. /// /// Returns true if requests has been issued. - fn listen_request( - &self, - block: Hash, - sender: Sender>, - ) -> bool; + fn listen_request(&self, block: Hash, sender: Sender>) -> bool; /// Returns (and forgets) all listeners for given request. fn on_response_received(&self, block: Hash) -> Vec>>; @@ -97,12 +101,10 @@ struct StorageSubscriptions { subscriptions_by_key: HashMap>, } -impl SharedRequests for Arc>> { - fn listen_request( - &self, - block: Block::Hash, - sender: Sender>, - ) -> bool { +impl SharedRequests + for Arc>> +{ + fn listen_request(&self, block: Block::Hash, sender: Sender>) -> bool { let mut subscriptions = self.lock(); let active_requests_at = subscriptions.active_requests.entry(block).or_default(); active_requests_at.push(sender); @@ -117,15 +119,12 @@ impl SharedRequests for Arc = Arc>>>>>; -impl SharedRequests for SimpleSubscriptions where +impl SharedRequests for SimpleSubscriptions +where Hash: Send + Eq + std::hash::Hash, V: Send, { - fn listen_request( - &self, - block: Hash, - sender: Sender>, - ) -> bool { + fn listen_request(&self, block: Hash, sender: Sender>) -> bool { let mut subscriptions = self.lock(); let active_requests_at = subscriptions.entry(block).or_default(); active_requests_at.push(sender); @@ -138,9 +137,9 @@ impl SharedRequests for SimpleSubscriptions where } impl + 'static, Client> LightState - where - Block: BlockT, - Client: HeaderBackend + Send + Sync + 'static, +where + Block: BlockT, + Client: HeaderBackend + Send + Sync + 'static, { /// Create new state API backend for light nodes. pub fn new( @@ -170,10 +169,10 @@ impl + 'static, Client> LightState StateBackend for LightState - where - Block: BlockT, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + 'static +where + Block: BlockT, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static, { fn call( &self, @@ -181,13 +180,17 @@ impl StateBackend for LightState FutureResult { - Box::new(call( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - method, - call_data, - ).boxed().compat()) + Box::new( + call( + &*self.remote_blockchain, + self.fetcher.clone(), + self.block_or_best(block), + method, + call_data, + ) + .boxed() + .compat(), + ) } fn storage_keys( @@ -216,11 +219,7 @@ impl StateBackend for LightState, - _: StorageKey, - ) -> FutureResult> { + fn storage_size(&self, _: Option, _: StorageKey) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) } @@ -229,15 +228,21 @@ impl StateBackend for LightState, key: StorageKey, ) -> FutureResult> { - Box::new(storage( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - vec![key.0.clone()], - ).boxed().compat().map(move |mut values| values - .remove(&key) - .expect("successful request has entries for all requested keys; qed") - )) + Box::new( + storage( + &*self.remote_blockchain, + self.fetcher.clone(), + self.block_or_best(block), + vec![key.0.clone()], + ) + .boxed() + .compat() + .map(move |mut values| { + values + .remove(&key) + .expect("successful request has entries for all requested keys; qed") + }), + ) } fn storage_hash( @@ -245,31 +250,38 @@ impl StateBackend for LightState, key: StorageKey, ) -> FutureResult> { - Box::new(StateBackend::storage(self, block, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) - ) + Box::new(StateBackend::storage(self, block, key).and_then(|maybe_storage| { + result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) + })) } fn metadata(&self, block: Option) -> FutureResult { - let metadata = self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) - .and_then(|metadata| OpaqueMetadata::decode(&mut &metadata.0[..]) - .map(Into::into) - .map_err(|decode_err| client_err(ClientError::CallResultDecode( - "Unable to decode metadata", - decode_err, - )))); + let metadata = + self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) + .and_then(|metadata| { + OpaqueMetadata::decode(&mut &metadata.0[..]).map(Into::into).map_err( + |decode_err| { + client_err(ClientError::CallResultDecode( + "Unable to decode metadata", + decode_err, + )) + }, + ) + }); Box::new(metadata) } fn runtime_version(&self, block: Option) -> FutureResult { - Box::new(runtime_version( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - ).boxed().compat()) + Box::new( + runtime_version( + &*self.remote_blockchain, + self.fetcher.clone(), + self.block_or_best(block), + ) + .boxed() + .compat(), + ) } fn query_storage( @@ -284,7 +296,7 @@ impl StateBackend for LightState, - _at: Option + _at: Option, ) -> FutureResult>> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) } @@ -301,14 +313,14 @@ impl StateBackend for LightState>, - keys: Option> + keys: Option>, ) { let keys = match keys { Some(keys) if !keys.is_empty() => keys, _ => { warn!("Cannot subscribe to all keys on light client. Subscription rejected."); - return; - } + return + }, }; let keys = keys.iter().cloned().collect::>(); @@ -326,12 +338,10 @@ impl StateBackend for LightState(notification.hash)) .compat(), - display_error(storage( - &*remote_blockchain, - fetcher.clone(), - initial_block, - initial_keys, - ).map(move |r| r.map(|r| (initial_block, r)))), + display_error( + storage(&*remote_blockchain, fetcher.clone(), initial_block, initial_keys) + .map(move |r| r.map(|r| (initial_block, r))), + ), move |block| { // there'll be single request per block for all active subscriptions // with all subscribed keys @@ -342,12 +352,7 @@ impl StateBackend for LightState StateBackend for LightState None, } - } + }, ); - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) .send_all(changes_stream.map(|changes| Ok(changes))) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) @@ -382,7 +386,9 @@ impl StateBackend for LightState StateBackend for LightState RpcResult { if !self.subscriptions.cancel(id.clone()) { - return Ok(false); + return Ok(false) } // forget subscription keys @@ -406,14 +412,16 @@ impl StateBackend for LightState unreachable!("every key from keys_by_subscription has\ - corresponding entry in subscriptions_by_key; qed"), + Entry::Vacant(_) => unreachable!( + "every key from keys_by_subscription has\ + corresponding entry in subscriptions_by_key; qed" + ), Entry::Occupied(mut entry) => { entry.get_mut().remove(&id); if entry.get().is_empty() { entry.remove(); } - } + }, } } @@ -437,16 +445,11 @@ impl StateBackend for LightState(notification.hash)) .compat(), - display_error(runtime_version( - &*remote_blockchain, - fetcher.clone(), - initial_block, - ).map(move |r| r.map(|r| (initial_block, r)))), - move |block| runtime_version( - &*remote_blockchain, - fetcher.clone(), - block, + display_error( + runtime_version(&*remote_blockchain, fetcher.clone(), initial_block) + .map(move |r| r.map(|r| (initial_block, r))), ), + move |block| runtime_version(&*remote_blockchain, fetcher.clone(), block), |_, old_version, new_version| { let version_differs = old_version .as_ref() @@ -456,11 +459,10 @@ impl StateBackend for LightState Some(new_version.clone()), false => None, } - } + }, ); - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) .send_all(versions_stream.map(|version| Ok(version))) // we ignore the resulting Stream (if the first stream is over we are unsubscribed) .map(|_| ()) @@ -486,10 +488,10 @@ impl StateBackend for LightState ChildStateBackend for LightState - where - Block: BlockT, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + 'static +where + Block: BlockT, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static, { fn read_child_proof( &self, @@ -528,23 +530,34 @@ impl ChildStateBackend for LightState FutureResult> { let block = self.block_or_best(block); let fetcher = self.fetcher.clone(); - let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key, - keys: vec![key.0.clone()], - retry_count: Default::default(), - }).then(move |result| ready(result - .map(|mut data| data - .remove(&key.0) - .expect("successful result has entry for all keys; qed") - .map(StorageData) - ) - .map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), + let child_storage = + resolve_header(&*self.remote_blockchain, &*self.fetcher, block).then(move |result| { + match result { + Ok(header) => Either::Left( + fetcher + .remote_read_child(RemoteReadChildRequest { + block, + header, + storage_key, + keys: vec![key.0.clone()], + retry_count: Default::default(), + }) + .then(move |result| { + ready( + result + .map(|mut data| { + data.remove(&key.0) + .expect( + "successful result has entry for all keys; qed", + ) + .map(StorageData) + }) + .map_err(client_err), + ) + }), + ), + Err(error) => Either::Right(ready(Err(error))), + } }); Box::new(child_storage.boxed().compat()) @@ -556,11 +569,11 @@ impl ChildStateBackend for LightState FutureResult> { - Box::new(ChildStateBackend::storage(self, block, storage_key, key) - .and_then(|maybe_storage| + Box::new(ChildStateBackend::storage(self, block, storage_key, key).and_then( + |maybe_storage| { result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) - ) + }, + )) } } @@ -570,17 +583,17 @@ fn resolve_header>( fetcher: &F, block: Block::Hash, ) -> impl std::future::Future> { - let maybe_header = future_header( - remote_blockchain, - fetcher, - BlockId::Hash(block), - ); - - maybe_header.then(move |result| - ready(result.and_then(|maybe_header| - maybe_header.ok_or_else(|| ClientError::UnknownBlock(format!("{}", block))) - ).map_err(client_err)), - ) + let maybe_header = future_header(remote_blockchain, fetcher, BlockId::Hash(block)); + + maybe_header.then(move |result| { + ready( + result + .and_then(|maybe_header| { + maybe_header.ok_or_else(|| ClientError::UnknownBlock(format!("{}", block))) + }) + .map_err(client_err), + ) + }) } /// Call runtime method at given block @@ -591,17 +604,20 @@ fn call>( method: String, call_data: Bytes, ) -> impl std::future::Future> { - resolve_header(remote_blockchain, &*fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_call(RemoteCallRequest { - block, - header, - method, - call_data: call_data.0, - retry_count: Default::default(), - }).then(|result| ready(result.map(Bytes).map_err(client_err)))), - Err(error) => Either::Right(ready(Err(error))), - }) + resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { + Ok(header) => Either::Left( + fetcher + .remote_call(RemoteCallRequest { + block, + header, + method, + call_data: call_data.0, + retry_count: Default::default(), + }) + .then(|result| ready(result.map(Bytes).map_err(client_err))), + ), + Err(error) => Either::Right(ready(Err(error))), + }) } /// Get runtime version at given block. @@ -610,17 +626,14 @@ fn runtime_version>( fetcher: Arc, block: Block::Hash, ) -> impl std::future::Future> { - call( - remote_blockchain, - fetcher, - block, - "Core_version".into(), - Bytes(Vec::new()), + call(remote_blockchain, fetcher, block, "Core_version".into(), Bytes(Vec::new())).then( + |version| { + ready(version.and_then(|version| { + Decode::decode(&mut &version.0[..]) + .map_err(|e| client_err(ClientError::VersionInvalid(e.to_string()))) + })) + }, ) - .then(|version| ready(version.and_then(|version| - Decode::decode(&mut &version.0[..]) - .map_err(|e| client_err(ClientError::VersionInvalid(e.to_string()))) - ))) } /// Get storage value at given key at given block. @@ -630,22 +643,30 @@ fn storage>( block: Block::Hash, keys: Vec>, ) -> impl std::future::Future>, Error>> { - resolve_header(remote_blockchain, &*fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read(RemoteReadRequest { - block, - header, - keys, - retry_count: Default::default(), - }).then(|result| ready(result - .map(|result| result - .into_iter() - .map(|(key, value)| (StorageKey(key), value.map(StorageData))) - .collect() - ).map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), - }) + resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { + Ok(header) => Either::Left( + fetcher + .remote_read(RemoteReadRequest { + block, + header, + keys, + retry_count: Default::default(), + }) + .then(|result| { + ready( + result + .map(|result| { + result + .into_iter() + .map(|(key, value)| (StorageKey(key), value.map(StorageData))) + .collect() + }) + .map_err(client_err), + ) + }), + ), + Err(error) => Either::Right(ready(Err(error))), + }) } /// Returns subscription stream that issues request on every imported block and @@ -654,9 +675,11 @@ fn subscription_stream< Block, Requests, FutureBlocksStream, - V, N, + V, + N, InitialRequestFuture, - IssueRequest, IssueRequestFuture, + IssueRequest, + IssueRequestFuture, CompareValues, >( shared_requests: Requests, @@ -664,12 +687,14 @@ fn subscription_stream< initial_request: InitialRequestFuture, issue_request: IssueRequest, compare_values: CompareValues, -) -> impl Stream where +) -> impl Stream +where Block: BlockT, Requests: 'static + SharedRequests, - FutureBlocksStream: Stream, + FutureBlocksStream: Stream, V: Send + 'static + Clone, - InitialRequestFuture: std::future::Future> + Send + 'static, + InitialRequestFuture: + std::future::Future> + Send + 'static, IssueRequest: 'static + Fn(Block::Hash) -> IssueRequestFuture, IssueRequestFuture: std::future::Future> + Send + 'static, CompareValues: Fn(Block::Hash, Option<&V>, &V) -> Option, @@ -678,33 +703,39 @@ fn subscription_stream< let previous_value = Arc::new(Mutex::new(None)); // prepare 'stream' of initial values - let initial_value_stream = ignore_error(initial_request) - .boxed() - .compat() - .into_stream(); + let initial_value_stream = ignore_error(initial_request).boxed().compat().into_stream(); // prepare stream of future values // // we do not want to stop stream if single request fails // (the warning should have been already issued by the request issuer) - let future_values_stream = future_blocks_stream - .and_then(move |block| ignore_error(maybe_share_remote_request::( - shared_requests.clone(), - block, - &issue_request, - ).map(move |r| r.map(|v| (block, v)))).boxed().compat()); + let future_values_stream = future_blocks_stream.and_then(move |block| { + ignore_error( + maybe_share_remote_request::( + shared_requests.clone(), + block, + &issue_request, + ) + .map(move |r| r.map(|v| (block, v))), + ) + .boxed() + .compat() + }); // now let's return changed values for selected blocks initial_value_stream .chain(future_values_stream) - .filter_map(move |block_and_new_value| block_and_new_value.and_then(|(block, new_value)| { - let mut previous_value = previous_value.lock(); - compare_values(block, previous_value.as_ref(), &new_value) - .map(|notification_value| { - *previous_value = Some(new_value); - notification_value - }) - })) + .filter_map(move |block_and_new_value| { + block_and_new_value.and_then(|(block, new_value)| { + let mut previous_value = previous_value.lock(); + compare_values(block, previous_value.as_ref(), &new_value).map( + |notification_value| { + *previous_value = Some(new_value); + notification_value + }, + ) + }) + }) .map_err(|_| ()) } @@ -714,7 +745,8 @@ fn maybe_share_remote_request impl std::future::Future> where +) -> impl std::future::Future> +where V: Clone, Requests: SharedRequests, IssueRequest: Fn(Block::Hash) -> IssueRequestFuture, @@ -725,55 +757,58 @@ fn maybe_share_remote_request(future: F) -> impl std::future::Future> where - F: std::future::Future> +fn display_error(future: F) -> impl std::future::Future> +where + F: std::future::Future>, { - future.then(|result| ready(result.or_else(|err| { + future.then(|result| { + ready(result.or_else(|err| { warn!("Remote request for subscription data has failed with: {:?}", err); Err(()) - }))) + })) + }) } /// Convert successful future result into Ok(Some(result)) and error into Ok(None), /// displaying warning. -fn ignore_error(future: F) -> impl std::future::Future, ()>> where - F: std::future::Future> +fn ignore_error(future: F) -> impl std::future::Future, ()>> +where + F: std::future::Future>, { - future.then(|result| ready(match result { - Ok(result) => Ok(Some(result)), - Err(()) => Ok(None), - })) + future.then(|result| { + ready(match result { + Ok(result) => Ok(Some(result)), + Err(()) => Ok(None), + }) + }) } #[cfg(test)] mod tests { + use super::*; use rpc::futures::stream::futures_ordered; - use substrate_test_runtime_client::runtime::Block; use sp_core::H256; - use super::*; + use substrate_test_runtime_client::runtime::Block; #[test] fn subscription_stream_works() { @@ -789,13 +824,10 @@ mod tests { |_, old_value, new_value| match old_value == Some(new_value) { true => None, false => Some(new_value.clone()), - } + }, ); - assert_eq!( - stream.collect().wait(), - Ok(vec![100, 200]) - ); + assert_eq!(stream.collect().wait(), Ok(vec![100, 200])); } #[test] @@ -812,13 +844,10 @@ mod tests { |_, old_value, new_value| match old_value == Some(new_value) { true => None, false => Some(new_value.clone()), - } + }, ); - assert_eq!( - stream.collect().wait(), - Ok(vec![100, 200]) - ); + assert_eq!(stream.collect().wait(), Ok(vec![100, 200])); } #[test] @@ -828,10 +857,7 @@ mod tests { let shared_requests = SimpleSubscriptions::default(); // let's 'issue' requests for B1 - shared_requests.lock().insert( - H256::from([1; 32]), - vec![channel().0], - ); + shared_requests.lock().insert(H256::from([1; 32]), vec![channel().0]); // make sure that no additional requests are issued when we're asking for B1 let _ = maybe_share_remote_request::( diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index c9cb0bde89c1ac65e7b8a8aa60abbc05cda944a7..2a73ae31f357d902674c48ec6474975a32ab6143 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -16,26 +16,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::*; -use super::state_full::split_range; use self::error::Error; +use super::{state_full::split_range, *}; -use std::sync::Arc; +use crate::testing::TaskExecutor; use assert_matches::assert_matches; +use futures::{compat::Future01CompatExt, executor}; use futures01::stream::Stream; -use sp_core::{storage::ChildInfo, ChangesTrieConfiguration}; -use sp_core::hash::H256; use sc_block_builder::BlockBuilderProvider; -use sp_io::hashing::blake2_256; -use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, - runtime, -}; use sc_rpc_api::DenyUnsafe; +use sp_core::{hash::H256, storage::ChildInfo, ChangesTrieConfiguration}; +use sp_io::hashing::blake2_256; use sp_runtime::generic::BlockId; -use crate::testing::TaskExecutor; -use futures::{executor, compat::Future01CompatExt}; +use std::sync::Arc; +use substrate_test_runtime_client::{prelude::*, runtime, sp_consensus::BlockOrigin}; const STORAGE_KEY: &[u8] = b"child"; @@ -68,12 +62,18 @@ fn should_return_storage() { let key = StorageKey(KEY.to_vec()); assert_eq!( - client.storage(key.clone(), Some(genesis_hash).into()).wait() - .map(|x| x.map(|x| x.0.len())).unwrap().unwrap() as usize, + client + .storage(key.clone(), Some(genesis_hash).into()) + .wait() + .map(|x| x.map(|x| x.0.len())) + .unwrap() + .unwrap() as usize, VALUE.len(), ); assert_matches!( - client.storage_hash(key.clone(), Some(genesis_hash).into()).wait() + client + .storage_hash(key.clone(), Some(genesis_hash).into()) + .wait() .map(|x| x.is_some()), Ok(true) ); @@ -87,10 +87,13 @@ fn should_return_storage() { ); assert_eq!( executor::block_on( - child.storage(prefixed_storage_key(), key, Some(genesis_hash).into()) + child + .storage(prefixed_storage_key(), key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) .compat(), - ).unwrap().unwrap() as usize, + ) + .unwrap() + .unwrap() as usize, CHILD_VALUE.len(), ); } @@ -98,20 +101,17 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { let child_info = ChildInfo::new_default(STORAGE_KEY); - let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage(&child_info, "key", vec![42_u8]) - .build()); - let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full( - client, - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, + let client = Arc::new( + substrate_test_runtime_client::TestClientBuilder::new() + .add_child_storage(&child_info, "key", vec![42_u8]) + .build(), ); + let genesis_hash = client.genesis_hash(); + let (_client, child) = + new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); - assert_matches!( child.storage( child_key.clone(), @@ -121,36 +121,26 @@ fn should_return_child_storage() { Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - child.storage_hash( - child_key.clone(), - key.clone(), - Some(genesis_hash).into(), - ).wait().map(|x| x.is_some()), + child + .storage_hash(child_key.clone(), key.clone(), Some(genesis_hash).into(),) + .wait() + .map(|x| x.is_some()), Ok(true) ); - assert_matches!( - child.storage_size( - child_key.clone(), - key.clone(), - None, - ).wait(), - Ok(Some(1)) - ); + assert_matches!(child.storage_size(child_key.clone(), key.clone(), None,).wait(), Ok(Some(1))); } #[test] fn should_call_contract() { let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let (client, _child) = new_full( - client, - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); + let (client, _child) = + new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); assert_matches!( - client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(), + client + .call("balanceOf".into(), Bytes(vec![1, 2, 3]), Some(genesis_hash).into()) + .wait(), Err(Error::Client(_)) ) } @@ -171,18 +161,17 @@ fn should_notify_about_storage_changes() { api.subscribe_storage(Default::default(), subscriber, None.into()); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id.compat()), Ok(Ok(SubscriptionId::String(_))))); let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); let block = builder.build().unwrap().block; executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } @@ -207,25 +196,27 @@ fn should_send_initial_storage_changes_and_notifications() { None, ); - let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); + let alice_balance_key = + blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); - api.subscribe_storage(Default::default(), subscriber, Some(vec![ - StorageKey(alice_balance_key.to_vec()), - ]).into()); + api.subscribe_storage( + Default::default(), + subscriber, + Some(vec![StorageKey(alice_balance_key.to_vec())]).into(), + ); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); + assert!(matches!(executor::block_on(id.compat()), Ok(Ok(SubscriptionId::String(_))))); let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); let block = builder.build().unwrap().block; executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } @@ -257,9 +248,13 @@ fn should_query_storage() { // fake change: None -> Some(value) -> Some(value) builder.push_storage_change(vec![2], Some(vec![2])).unwrap(); // actual change: None -> Some(value) -> None - builder.push_storage_change(vec![3], if nonce == 0 { Some(vec![3]) } else { None }).unwrap(); + builder + .push_storage_change(vec![3], if nonce == 0 { Some(vec![3]) } else { None }) + .unwrap(); // actual change: None -> Some(value) - builder.push_storage_change(vec![4], if nonce == 0 { None } else { Some(vec![4]) }).unwrap(); + builder + .push_storage_change(vec![4], if nonce == 0 { None } else { Some(vec![4]) }) + .unwrap(); // actual change: Some(value1) -> Some(value2) builder.push_storage_change(vec![5], Some(vec![nonce as u8])).unwrap(); let block = builder.build().unwrap().block; @@ -301,20 +296,12 @@ fn should_query_storage() { // Query changes only up to block1 let keys = (1..6).map(|k| StorageKey(vec![k])).collect::>(); - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(block1_hash).into(), - ); + let result = api.query_storage(keys.clone(), genesis_hash, Some(block1_hash).into()); assert_eq!(result.wait().unwrap(), expected); // Query all changes - let result = api.query_storage( - keys.clone(), - genesis_hash, - None.into(), - ); + let result = api.query_storage(keys.clone(), genesis_hash, None.into()); expected.push(StorageChangeSet { block: block2_hash, @@ -327,20 +314,12 @@ fn should_query_storage() { assert_eq!(result.wait().unwrap(), expected); // Query changes up to block2. - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(block2_hash), - ); + let result = api.query_storage(keys.clone(), genesis_hash, Some(block2_hash)); assert_eq!(result.wait().unwrap(), expected); // Inverted range. - let result = api.query_storage( - keys.clone(), - block1_hash, - Some(genesis_hash), - ); + let result = api.query_storage(keys.clone(), block1_hash, Some(genesis_hash)); assert_eq!( result.wait().map_err(|e| e.to_string()), @@ -348,18 +327,15 @@ fn should_query_storage() { from: format!("1 ({:?})", block1_hash), to: format!("0 ({:?})", genesis_hash), details: "from number > to number".to_owned(), - }).map_err(|e| e.to_string()) + }) + .map_err(|e| e.to_string()) ); let random_hash1 = H256::random(); let random_hash2 = H256::random(); // Invalid second hash. - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(random_hash1), - ); + let result = api.query_storage(keys.clone(), genesis_hash, Some(random_hash1)); assert_eq!( result.wait().map_err(|e| e.to_string()), @@ -367,15 +343,12 @@ fn should_query_storage() { from: format!("{:?}", genesis_hash), to: format!("{:?}", Some(random_hash1)), details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()) + }) + .map_err(|e| e.to_string()) ); // Invalid first hash with Some other hash. - let result = api.query_storage( - keys.clone(), - random_hash1, - Some(genesis_hash), - ); + let result = api.query_storage(keys.clone(), random_hash1, Some(genesis_hash)); assert_eq!( result.wait().map_err(|e| e.to_string()), @@ -383,15 +356,12 @@ fn should_query_storage() { from: format!("{:?}", random_hash1), to: format!("{:?}", Some(genesis_hash)), details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), + }) + .map_err(|e| e.to_string()), ); // Invalid first hash with None. - let result = api.query_storage( - keys.clone(), - random_hash1, - None, - ); + let result = api.query_storage(keys.clone(), random_hash1, None); assert_eq!( result.wait().map_err(|e| e.to_string()), @@ -399,15 +369,12 @@ fn should_query_storage() { from: format!("{:?}", random_hash1), to: format!("{:?}", Some(block2_hash)), // Best block hash. details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), + }) + .map_err(|e| e.to_string()), ); // Both hashes invalid. - let result = api.query_storage( - keys.clone(), - random_hash1, - Some(random_hash2), - ); + let result = api.query_storage(keys.clone(), random_hash1, Some(random_hash2)); assert_eq!( result.wait().map_err(|e| e.to_string()), @@ -415,29 +382,25 @@ fn should_query_storage() { from: format!("{:?}", random_hash1), // First hash not found. to: format!("{:?}", Some(random_hash2)), details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), + }) + .map_err(|e| e.to_string()), ); // single block range - let result = api.query_storage_at( - keys.clone(), - Some(block1_hash), - ); + let result = api.query_storage_at(keys.clone(), Some(block1_hash)); assert_eq!( result.wait().unwrap(), - vec![ - StorageChangeSet { - block: block1_hash, - changes: vec![ - (StorageKey(vec![1_u8]), None), - (StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))), - (StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))), - (StorageKey(vec![4_u8]), None), - (StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))), - ] - } - ] + vec![StorageChangeSet { + block: block1_hash, + changes: vec![ + (StorageKey(vec![1_u8]), None), + (StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))), + (StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))), + (StorageKey(vec![4_u8]), None), + (StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))), + ] + }] ); } @@ -461,7 +424,6 @@ fn should_split_ranges() { assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); } - #[test] fn should_return_runtime_version() { let client = Arc::new(substrate_test_runtime_client::new()); @@ -503,17 +465,13 @@ fn should_notify_on_runtime_version_initially() { api.subscribe_runtime_version(Default::default(), subscriber); // assert id assigned - assert!(matches!( - executor::block_on(id.compat()), - Ok(Ok(SubscriptionId::String(_))) - )); - + assert!(matches!(executor::block_on(id.compat()), Ok(Ok(SubscriptionId::String(_))))); } // assert initial version sent. let (notification, next) = executor::block_on(transport.into_future().compat()).unwrap(); assert!(notification.is_some()); - // no more notifications on this channel + // no more notifications on this channel assert_eq!(executor::block_on(next.into_future().compat()).unwrap().0, None); } diff --git a/substrate/client/rpc/src/system/mod.rs b/substrate/client/rpc/src/system/mod.rs index d405755731ccbc5c8d6db49cecf70c71547ed55c..08258640ad7a090f19b89443f4418d72dd985a99 100644 --- a/substrate/client/rpc/src/system/mod.rs +++ b/substrate/client/rpc/src/system/mod.rs @@ -21,24 +21,25 @@ #[cfg(test)] mod tests; -use futures::{future::BoxFuture, FutureExt, TryFutureExt}; -use futures::{channel::oneshot, compat::Compat}; +use futures::{channel::oneshot, compat::Compat, future::BoxFuture, FutureExt, TryFutureExt}; use sc_rpc_api::{DenyUnsafe, Receiver}; use sc_tracing::logging; -use sp_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{self, Header as HeaderT}; +use sp_utils::mpsc::TracingUnboundedSender; use self::error::Result; +pub use self::{ + gen_client::Client as SystemClient, + helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}, +}; pub use sc_rpc_api::system::*; -pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole, SyncState}; -pub use self::gen_client::Client as SystemClient; /// Early exit for RPCs that require `--rpc-methods=Unsafe` to be enabled macro_rules! bail_if_unsafe { ($value: expr) => { if let Err(err) = $value.check_if_safe() { - return async move { Err(err.into()) }.boxed().compat(); + return async move { Err(err.into()) }.boxed().compat() } }; } @@ -85,11 +86,7 @@ impl System { send_back: TracingUnboundedSender>, deny_unsafe: DenyUnsafe, ) -> Self { - System { - info, - send_back, - deny_unsafe, - } + System { info, send_back, deny_unsafe } } } @@ -132,35 +129,36 @@ impl SystemApi::Number> for Sy Receiver(Compat::new(rx)) } - fn system_peers(&self) - -> Compat::Number>>>>> - { + fn system_peers( + &self, + ) -> Compat< + BoxFuture<'static, rpc::Result::Number>>>>, + > { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::Peers(tx)); - async move { - rx.await.map_err(|_| rpc::Error::internal_error()) - }.boxed().compat() + async move { rx.await.map_err(|_| rpc::Error::internal_error()) } + .boxed() + .compat() } - fn system_network_state(&self) - -> Compat>> - { + fn system_network_state(&self) -> Compat>> { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkState(tx)); - async move { - rx.await.map_err(|_| rpc::Error::internal_error()) - }.boxed().compat() + async move { rx.await.map_err(|_| rpc::Error::internal_error()) } + .boxed() + .compat() } - fn system_add_reserved_peer(&self, peer: String) - -> Compat>> - { + fn system_add_reserved_peer( + &self, + peer: String, + ) -> Compat>> { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); @@ -171,12 +169,15 @@ impl SystemApi::Number> for Sy Ok(Err(e)) => Err(rpc::Error::from(e)), Err(_) => Err(rpc::Error::internal_error()), } - }.boxed().compat() + } + .boxed() + .compat() } - fn system_remove_reserved_peer(&self, peer: String) - -> Compat>> - { + fn system_remove_reserved_peer( + &self, + peer: String, + ) -> Compat>> { bail_if_unsafe!(self.deny_unsafe); let (tx, rx) = oneshot::channel(); @@ -187,7 +188,9 @@ impl SystemApi::Number> for Sy Ok(Err(e)) => Err(rpc::Error::from(e)), Err(_) => Err(rpc::Error::internal_error()), } - }.boxed().compat() + } + .boxed() + .compat() } fn system_reserved_peers(&self) -> Receiver> { @@ -214,7 +217,7 @@ impl SystemApi::Number> for Sy logging::reload_filter().map_err(|_e| rpc::Error::internal_error()) } - fn system_reset_log_filter(&self)-> std::result::Result<(), rpc::Error> { + fn system_reset_log_filter(&self) -> std::result::Result<(), rpc::Error> { self.deny_unsafe.check_if_safe()?; logging::reset_log_filter().map_err(|_e| rpc::Error::internal_error()) } diff --git a/substrate/client/rpc/src/system/tests.rs b/substrate/client/rpc/src/system/tests.rs index 6e22004cd65f70377feee2f8e2abb9242cf340d8..906bd60229d1f1f15e10a74cd50a15eb21c92097 100644 --- a/substrate/client/rpc/src/system/tests.rs +++ b/substrate/client/rpc/src/system/tests.rs @@ -18,13 +18,17 @@ use super::*; -use sc_network::{self, PeerId}; -use sc_network::config::Role; -use substrate_test_runtime_client::runtime::Block; use assert_matches::assert_matches; use futures::prelude::*; +use sc_network::{self, config::Role, PeerId}; use sp_utils::mpsc::tracing_unbounded; -use std::{process::{Stdio, Command}, env, io::{BufReader, BufRead, Write}, thread}; +use std::{ + env, + io::{BufRead, BufReader, Write}, + process::{Command, Stdio}, + thread, +}; +use substrate_test_runtime_client::runtime::Block; struct Status { pub peers: usize, @@ -35,12 +39,7 @@ struct Status { impl Default for Status { fn default() -> Status { - Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: false, - } + Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: false } } } @@ -59,7 +58,8 @@ fn api>>(sync: T) -> System { }); }, Request::LocalPeerId(sender) => { - let _ = sender.send("QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()); + let _ = + sender.send("QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()); }, Request::LocalListenAddresses(sender) => { let _ = sender.send(vec![ @@ -78,42 +78,48 @@ fn api>>(sync: T) -> System { }); } let _ = sender.send(peers); - } + }, Request::NetworkState(sender) => { - let _ = sender.send(serde_json::to_value(&sc_network::network_state::NetworkState { - peer_id: String::new(), - listened_addresses: Default::default(), - external_addresses: Default::default(), - connected_peers: Default::default(), - not_connected_peers: Default::default(), - peerset: serde_json::Value::Null, - }).unwrap()); + let _ = sender.send( + serde_json::to_value(&sc_network::network_state::NetworkState { + peer_id: String::new(), + listened_addresses: Default::default(), + external_addresses: Default::default(), + connected_peers: Default::default(), + not_connected_peers: Default::default(), + peerset: serde_json::Value::Null, + }) + .unwrap(), + ); }, Request::NetworkAddReservedPeer(peer, sender) => { let _ = match sc_network::config::parse_str_addr(&peer) { Ok(_) => sender.send(Ok(())), - Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), + Err(s) => + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), }; }, Request::NetworkRemoveReservedPeer(peer, sender) => { let _ = match peer.parse::() { Ok(_) => sender.send(Ok(())), - Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), + Err(s) => + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), }; - } + }, Request::NetworkReservedPeers(sender) => { - let _ = sender.send(vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()]); - } + let _ = sender + .send(vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()]); + }, Request::NodeRoles(sender) => { let _ = sender.send(vec![NodeRole::Authority]); - } + }, Request::SyncState(sender) => { let _ = sender.send(SyncState { starting_block: 1, current_block: 2, highest_block: Some(3), }); - } + }, }; future::ready(()) @@ -128,7 +134,7 @@ fn api>>(sync: T) -> System { chain_type: Default::default(), }, tx, - sc_rpc_api::DenyUnsafe::No + sc_rpc_api::DenyUnsafe::No, ) } @@ -139,95 +145,58 @@ fn wait_receiver(rx: Receiver) -> T { #[test] fn system_name_works() { - assert_eq!( - api(None).system_name().unwrap(), - "testclient".to_owned(), - ); + assert_eq!(api(None).system_name().unwrap(), "testclient".to_owned(),); } #[test] fn system_version_works() { - assert_eq!( - api(None).system_version().unwrap(), - "0.2.0".to_owned(), - ); + assert_eq!(api(None).system_version().unwrap(), "0.2.0".to_owned(),); } #[test] fn system_chain_works() { - assert_eq!( - api(None).system_chain().unwrap(), - "testchain".to_owned(), - ); + assert_eq!(api(None).system_chain().unwrap(), "testchain".to_owned(),); } #[test] fn system_properties_works() { - assert_eq!( - api(None).system_properties().unwrap(), - serde_json::map::Map::new(), - ); + assert_eq!(api(None).system_properties().unwrap(), serde_json::map::Map::new(),); } #[test] fn system_type_works() { - assert_eq!( - api(None).system_type().unwrap(), - Default::default(), - ); + assert_eq!(api(None).system_type().unwrap(), Default::default(),); } #[test] fn system_health() { assert_matches!( wait_receiver(api(None).system_health()), - Health { - peers: 0, - is_syncing: false, - should_have_peers: true, - } + Health { peers: 0, is_syncing: false, should_have_peers: true } ); assert_matches!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: true, - is_dev: true, - }).system_health()), - Health { - peers: 5, - is_syncing: true, - should_have_peers: false, - } + wait_receiver( + api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: true, is_dev: true }) + .system_health() + ), + Health { peers: 5, is_syncing: true, should_have_peers: false } ); assert_eq!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: false, - is_dev: false, - }).system_health()), - Health { - peers: 5, - is_syncing: false, - should_have_peers: true, - } + wait_receiver( + api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: false, is_dev: false }) + .system_health() + ), + Health { peers: 5, is_syncing: false, should_have_peers: true } ); assert_eq!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: true, - }).system_health()), - Health { - peers: 0, - is_syncing: false, - should_have_peers: false, - } + wait_receiver( + api(Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: true }) + .system_health() + ), + Health { peers: 0, is_syncing: false, should_have_peers: false } ); } @@ -244,8 +213,10 @@ fn system_local_listen_addresses_works() { assert_eq!( wait_receiver(api(None).system_local_listen_addresses()), vec![ - "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), - "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" + .to_string(), + "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" + .to_string(), ] ); } @@ -255,12 +226,8 @@ fn system_peers() { let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); let peer_id = PeerId::random(); - let req = api(Status { - peer_id: peer_id.clone(), - peers: 1, - is_syncing: false, - is_dev: true, - }).system_peers(); + let req = api(Status { peer_id: peer_id.clone(), peers: 1, is_syncing: false, is_dev: true }) + .system_peers(); let res = runtime.block_on(req).unwrap(); assert_eq!( @@ -295,27 +262,21 @@ fn system_network_state() { #[test] fn system_node_roles() { - assert_eq!( - wait_receiver(api(None).system_node_roles()), - vec![NodeRole::Authority] - ); + assert_eq!(wait_receiver(api(None).system_node_roles()), vec![NodeRole::Authority]); } #[test] fn system_sync_state() { assert_eq!( wait_receiver(api(None).system_sync_state()), - SyncState { - starting_block: 1, - current_block: 2, - highest_block: Some(3), - } + SyncState { starting_block: 1, current_block: 2, highest_block: Some(3) } ); } #[test] fn system_network_add_reserved() { - let good_peer_id = "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; + let good_peer_id = + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; let bad_peer_id = "/ip4/198.51.100.19/tcp/30333"; let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); @@ -328,7 +289,8 @@ fn system_network_add_reserved() { #[test] fn system_network_remove_reserved() { let good_peer_id = "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let bad_peer_id = "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; + let bad_peer_id = + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); let good_fut = api(None).system_remove_reserved_peer(good_peer_id.into()); @@ -357,15 +319,17 @@ fn test_add_reset_log_filter() { for line in std::io::stdin().lock().lines() { let line = line.expect("Failed to read bytes"); if line.contains("add_reload") { - api(None).system_add_log_filter("test_after_add".into()) + api(None) + .system_add_log_filter("test_after_add".into()) .expect("`system_add_log_filter` failed"); } else if line.contains("add_trace") { - api(None).system_add_log_filter("test_before_add=trace".into()) + api(None) + .system_add_log_filter("test_before_add=trace".into()) .expect("`system_add_log_filter` failed"); } else if line.contains("reset") { api(None).system_reset_log_filter().expect("`system_reset_log_filter` failed"); } else if line.contains("exit") { - return; + return } log::trace!(target: "test_before_add", "{}", EXPECTED_WITH_TRACE); log::debug!(target: "test_before_add", "{}", EXPECTED_BEFORE_ADD); diff --git a/substrate/client/rpc/src/testing.rs b/substrate/client/rpc/src/testing.rs index b69cc7d4b194036d945930395d35f85ccd0a00a2..e6b30ecdb42b47e513466d6e8d0fdbe156d66852 100644 --- a/substrate/client/rpc/src/testing.rs +++ b/substrate/client/rpc/src/testing.rs @@ -18,8 +18,8 @@ //! Testing utils used by the RPC tests. +use futures::{compat::Future01CompatExt, executor, FutureExt}; use rpc::futures::future as future01; -use futures::{executor, compat::Future01CompatExt, FutureExt}; // Executor shared by all tests. // @@ -38,7 +38,7 @@ impl future01::Executor for TaskExecutor { fn execute( &self, future: Boxed01Future01, - ) -> std::result::Result<(), future01::ExecuteError>{ + ) -> std::result::Result<(), future01::ExecuteError> { EXECUTOR.spawn_ok(future.compat().map(drop)); Ok(()) } diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index 46590ce8e8c6cbaac1b5097adf55df3b1a7f62d1..2885fb6deb54c269681de107cb2fdb29a64d0028 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -17,65 +17,52 @@ // along with this program. If not, see . use crate::{ - error::Error, MallocSizeOfWasm, RpcHandlers, - start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, - metrics::MetricsService, + build_network_future, client::{light, Client, ClientConfig}, config::{Configuration, KeystoreConfig, PrometheusConfig, TransactionStorageMode}, + error::Error, + metrics::MetricsService, + start_rpc_servers, MallocSizeOfWasm, RpcHandlers, SpawnTaskHandle, TaskManager, + TransactionPoolAdapter, }; -use sc_client_api::{ - light::RemoteBlockchain, ForkBlocks, BadBlocks, UsageProvider, ExecutorProvider, -}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sc_chain_spec::get_extension; -use sp_consensus::{ - block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain}, - import_queue::ImportQueue, -}; +use futures::{channel::oneshot, future::ready, FutureExt, StreamExt}; use jsonrpc_pubsub::manager::SubscriptionManager; -use futures::{ - FutureExt, StreamExt, - future::ready, - channel::oneshot, -}; -use sc_keystore::LocalKeystore; use log::info; -use sc_network::config::{Role, OnDemand, SyncMode}; -use sc_network::NetworkService; -use sc_network::block_request_handler::{self, BlockRequestHandler}; -use sc_network::state_request_handler::{self, StateRequestHandler}; -use sc_network::light_client_requests::{self, handler::LightClientRequestHandler}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, HashFor, Zero, BlockIdTo, +use prometheus_endpoint::Registry; +use sc_chain_spec::get_extension; +use sc_client_api::{ + execution_extensions::ExecutionExtensions, light::RemoteBlockchain, + proof_provider::ProofProvider, BadBlocks, BlockBackend, BlockchainEvents, ExecutorProvider, + ForkBlocks, StorageProvider, UsageProvider, }; -use sp_api::{ProvideRuntimeApi, CallApiAt}; -use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; -use std::{sync::Arc, str::FromStr}; -use wasm_timer::SystemTime; -use sc_telemetry::{ - telemetry, - ConnectionMessage, - Telemetry, - TelemetryHandle, - SUBSTRATE_INFO, +use sc_client_db::{Backend, DatabaseSettings}; +use sc_executor::{NativeExecutionDispatch, NativeExecutor, RuntimeInfo}; +use sc_keystore::LocalKeystore; +use sc_network::{ + block_request_handler::{self, BlockRequestHandler}, + config::{OnDemand, Role, SyncMode}, + light_client_requests::{self, handler::LightClientRequestHandler}, + state_request_handler::{self, StateRequestHandler}, + NetworkService, }; +use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; -use prometheus_endpoint::Registry; -use sc_client_db::{Backend, DatabaseSettings}; -use sp_core::traits::{ - CodeExecutor, - SpawnNamed, +use sp_api::{CallApiAt, ProvideRuntimeApi}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; +use sp_consensus::{ + block_validation::{BlockAnnounceValidator, Chain, DefaultBlockAnnounceValidator}, + import_queue::ImportQueue, }; +use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; -use sp_runtime::BuildStorage; -use sc_client_api::{ - BlockBackend, BlockchainEvents, - StorageProvider, - proof_provider::ProofProvider, - execution_extensions::ExecutionExtensions +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, BlockIdTo, HashFor, Zero}, + BuildStorage, }; -use sp_blockchain::{HeaderMetadata, HeaderBackend}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use std::{str::FromStr, sync::Arc}; +use wasm_timer::SystemTime; /// A utility trait for building an RPC extension given a `DenyUnsafe` instance. /// This is useful since at service definition time we don't know whether the @@ -95,7 +82,8 @@ pub trait RpcExtensionBuilder { ) -> Self::Output; } -impl RpcExtensionBuilder for F where +impl RpcExtensionBuilder for F +where F: Fn(sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> R, R: sc_rpc::RpcExtension, { @@ -115,7 +103,8 @@ impl RpcExtensionBuilder for F where /// `DenyUnsafe` instance and return a static `RpcExtension` instance. pub struct NoopRpcExtensionBuilder(pub R); -impl RpcExtensionBuilder for NoopRpcExtensionBuilder where +impl RpcExtensionBuilder for NoopRpcExtensionBuilder +where R: Clone + sc_rpc::RpcExtension, { type Output = R; @@ -129,7 +118,8 @@ impl RpcExtensionBuilder for NoopRpcExtensionBuilder where } } -impl From for NoopRpcExtensionBuilder where +impl From for NoopRpcExtensionBuilder +where R: sc_rpc::RpcExtension, { fn from(e: R) -> NoopRpcExtensionBuilder { @@ -137,58 +127,37 @@ impl From for NoopRpcExtensionBuilder where } } - /// Full client type. -pub type TFullClient = Client< - TFullBackend, - TFullCallExecutor, - TBl, - TRtApi, ->; +pub type TFullClient = + Client, TFullCallExecutor, TBl, TRtApi>; /// Full client backend type. pub type TFullBackend = sc_client_db::Backend; /// Full client call executor type. -pub type TFullCallExecutor = crate::client::LocalCallExecutor< - TBl, - sc_client_db::Backend, - NativeExecutor, ->; +pub type TFullCallExecutor = + crate::client::LocalCallExecutor, NativeExecutor>; /// Light client type. -pub type TLightClient = TLightClientWithBackend< - TBl, TRtApi, TExecDisp, TLightBackend ->; +pub type TLightClient = + TLightClientWithBackend>; /// Light client backend type. -pub type TLightBackend = sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor, ->; +pub type TLightBackend = + sc_light::Backend, HashFor>; /// Light call executor type. pub type TLightCallExecutor = sc_light::GenesisCallExecutor< - sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor - >, + sc_light::Backend, HashFor>, crate::client::LocalCallExecutor< TBl, - sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor - >, - NativeExecutor + sc_light::Backend, HashFor>, + NativeExecutor, >, >; -type TFullParts = ( - TFullClient, - Arc>, - KeystoreContainer, - TaskManager, -); +type TFullParts = + (TFullClient, Arc>, KeystoreContainer, TaskManager); type TLightParts = ( Arc>, @@ -199,10 +168,8 @@ type TLightParts = ( ); /// Light client backend type with a specific hash type. -pub type TLightBackendWithHash = sc_light::Backend< - sc_client_db::light::LightStorage, - THash, ->; +pub type TLightBackendWithHash = + sc_light::Backend, THash>; /// Light client type with a specific backend. pub type TLightClientWithBackend = Client< @@ -220,7 +187,10 @@ trait AsCryptoStoreRef { fn sync_keystore_ref(&self) -> Arc; } -impl AsCryptoStoreRef for Arc where T: CryptoStore + SyncCryptoStore + 'static { +impl AsCryptoStoreRef for Arc +where + T: CryptoStore + SyncCryptoStore + 'static, +{ fn keystore_ref(&self) -> Arc { self.clone() } @@ -239,14 +209,12 @@ impl KeystoreContainer { /// Construct KeystoreContainer pub fn new(config: &KeystoreConfig) -> Result { let keystore = Arc::new(match config { - KeystoreConfig::Path { path, password } => LocalKeystore::open( - path.clone(), - password.clone(), - )?, + KeystoreConfig::Path { path, password } => + LocalKeystore::open(path.clone(), password.clone())?, KeystoreConfig::InMemory => LocalKeystore::in_memory(), }); - Ok(Self{remote: Default::default(), local: keystore}) + Ok(Self { remote: Default::default(), local: keystore }) } /// Set the remote keystore. @@ -255,7 +223,8 @@ impl KeystoreContainer { /// does not reset any references previously handed out - they will /// stick around. pub fn set_remote_keystore(&mut self, remote: Arc) - where T: CryptoStore + SyncCryptoStore + 'static + where + T: CryptoStore + SyncCryptoStore + 'static, { self.remote = Some(Box::new(remote)) } @@ -295,7 +264,8 @@ impl KeystoreContainer { pub fn new_full_client( config: &Configuration, telemetry: Option, -) -> Result, Error> where +) -> Result, Error> +where TBl: BlockT, TExecDisp: NativeExecutionDispatch + 'static, TBl::Hash: FromStr, @@ -307,7 +277,8 @@ pub fn new_full_client( pub fn new_full_parts( config: &Configuration, telemetry: Option, -) -> Result, Error> where +) -> Result, Error> +where TBl: BlockT, TExecDisp: NativeExecutionDispatch + 'static, TBl::Hash: FromStr, @@ -337,15 +308,13 @@ pub fn new_full_parts( let (client, backend) = { let db_config = sc_client_db::DatabaseSettings { state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), keep_blocks: config.keep_blocks.clone(), transaction_storage: config.transaction_storage.clone(), }; - let backend = new_db_backend(db_config)?; let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( @@ -354,15 +323,20 @@ pub fn new_full_parts( sc_offchain::OffchainDb::factory_from_backend(&*backend), ); - let wasm_runtime_substitutes = config.chain_spec.code_substitutes().into_iter().map(|(h, c)| { - let hash = TBl::Hash::from_str(&h) - .map_err(|_| - Error::Application(Box::from( - format!("Failed to parse `{}` as block hash for code substitutes.", h) - )) - )?; - Ok((hash, c)) - }).collect::, Error>>()?; + let wasm_runtime_substitutes = config + .chain_spec + .code_substitutes() + .into_iter() + .map(|(h, c)| { + let hash = TBl::Hash::from_str(&h).map_err(|_| { + Error::Application(Box::from(format!( + "Failed to parse `{}` as block hash for code substitutes.", + h + ))) + })?; + Ok((hash, c)) + }) + .collect::, Error>>()?; let client = new_client( backend.clone(), @@ -375,10 +349,13 @@ pub fn new_full_parts( config.prometheus_config.as_ref().map(|config| config.registry.clone()), telemetry, ClientConfig { - offchain_worker_enabled : config.offchain_worker.enabled, + offchain_worker_enabled: config.offchain_worker.enabled, offchain_indexing_api: config.offchain_worker.indexing_enabled, wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), - no_genesis: matches!(config.network.sync_mode, sc_network::config::SyncMode::Fast {..}), + no_genesis: matches!( + config.network.sync_mode, + sc_network::config::SyncMode::Fast { .. } + ), wasm_runtime_substitutes, }, )?; @@ -386,19 +363,15 @@ pub fn new_full_parts( (client, backend) }; - Ok(( - client, - backend, - keystore_container, - task_manager, - )) + Ok((client, backend, keystore_container, task_manager)) } /// Create the initial parts of a light node. pub fn new_light_parts( config: &Configuration, telemetry: Option, -) -> Result, Error> where +) -> Result, Error> +where TBl: BlockT, TExecDisp: NativeExecutionDispatch + 'static, { @@ -417,8 +390,7 @@ pub fn new_light_parts( let db_storage = { let db_settings = sc_client_db::DatabaseSettings { state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), state_pruning: config.state_pruning.clone(), source: config.database.clone(), keep_blocks: config.keep_blocks.clone(), @@ -427,13 +399,11 @@ pub fn new_light_parts( sc_client_db::light::LightStorage::new(db_settings)? }; let light_blockchain = sc_light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new( - sc_light::new_fetch_checker::<_, TBl, _>( - light_blockchain.clone(), - executor.clone(), - Box::new(task_manager.spawn_handle()), - ), - ); + let fetch_checker = Arc::new(sc_light::new_fetch_checker::<_, TBl, _>( + light_blockchain.clone(), + executor.clone(), + Box::new(task_manager.spawn_handle()), + )); let on_demand = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); let backend = sc_light::new_light_backend(light_blockchain); let client = Arc::new(light::new_light( @@ -451,7 +421,8 @@ pub fn new_light_parts( /// Create an instance of default DB-backend backend. pub fn new_db_backend( settings: DatabaseSettings, -) -> Result>, sp_blockchain::Error> where +) -> Result>, sp_blockchain::Error> +where Block: BlockT, { const CANONICALIZATION_DELAY: u64 = 4096; @@ -480,11 +451,16 @@ pub fn new_client( >, sp_blockchain::Error, > - where - Block: BlockT, - E: CodeExecutor + RuntimeInfo, +where + Block: BlockT, + E: CodeExecutor + RuntimeInfo, { - let executor = crate::client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone())?; + let executor = crate::client::LocalCallExecutor::new( + backend.clone(), + executor, + spawn_handle, + config.clone(), + )?; Ok(crate::client::Client::new( backend, executor, @@ -534,10 +510,10 @@ pub fn build_offchain_workers( client: Arc, network: Arc::Hash>>, ) -> Option>> - where - TBl: BlockT, - TCl: Send + Sync + ProvideRuntimeApi + BlockchainEvents + 'static, - >::Api: sc_offchain::OffchainWorkerApi, +where + TBl: BlockT, + TCl: Send + Sync + ProvideRuntimeApi + BlockchainEvents + 'static, + >::Api: sc_offchain::OffchainWorkerApi, { let offchain_workers = Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone()))); @@ -551,7 +527,7 @@ pub fn build_offchain_workers( offchain, Clone::clone(&spawn_handle), network.clone(), - ) + ), ); } @@ -562,22 +538,32 @@ pub fn build_offchain_workers( pub fn spawn_tasks( params: SpawnTasksParams, ) -> Result - where - TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + - HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + - StorageProvider + CallApiAt + Send + 'static, - >::Api: - sp_api::Metadata + - sc_offchain::OffchainWorkerApi + - sp_transaction_pool::runtime_api::TaggedTransactionQueue + - sp_session::SessionKeys + - sp_api::ApiExt, - TBl: BlockT, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TExPool: MaintainedTransactionPool::Hash> + - MallocSizeOfWasm + 'static, - TRpc: sc_rpc::RpcExtension +where + TCl: ProvideRuntimeApi + + HeaderMetadata + + Chain + + BlockBackend + + BlockIdTo + + ProofProvider + + HeaderBackend + + BlockchainEvents + + ExecutorProvider + + UsageProvider + + StorageProvider + + CallApiAt + + Send + + 'static, + >::Api: sp_api::Metadata + + sc_offchain::OffchainWorkerApi + + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_session::SessionKeys + + sp_api::ApiExt, + TBl: BlockT, + TBackend: 'static + sc_client_api::backend::Backend + Send, + TExPool: MaintainedTransactionPool::Hash> + + MallocSizeOfWasm + + 'static, + TRpc: sc_rpc::RpcExtension, { let SpawnTasksParams { mut config, @@ -600,17 +586,11 @@ pub fn spawn_tasks( client.clone(), &BlockId::Hash(chain_info.best_hash), config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - ).map_err(|e| Error::Application(Box::new(e)))?; + ) + .map_err(|e| Error::Application(Box::new(e)))?; let telemetry = telemetry - .map(|telemetry| { - init_telemetry( - &mut config, - network.clone(), - client.clone(), - telemetry, - ) - }) + .map(|telemetry| init_telemetry(&mut config, network.clone(), client.clone(), telemetry)) .transpose()?; info!("📦 Highest known block at #{}", chain_info.best_number); @@ -625,63 +605,69 @@ pub fn spawn_tasks( spawn_handle.spawn( "on-transaction-imported", - transaction_notifications( - transaction_pool.clone(), - network.clone(), - telemetry.clone(), - ), + transaction_notifications(transaction_pool.clone(), network.clone(), telemetry.clone()), ); // Prometheus metrics. - let metrics_service = if let Some(PrometheusConfig { port, registry }) = - config.prometheus_config.clone() - { - // Set static metrics. - let metrics = MetricsService::with_prometheus(telemetry.clone(), ®istry, &config)?; - spawn_handle.spawn( - "prometheus-endpoint", - prometheus_endpoint::init_prometheus(port, registry).map(drop) - ); + let metrics_service = + if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + // Set static metrics. + let metrics = MetricsService::with_prometheus(telemetry.clone(), ®istry, &config)?; + spawn_handle.spawn( + "prometheus-endpoint", + prometheus_endpoint::init_prometheus(port, registry).map(drop), + ); - metrics - } else { - MetricsService::new(telemetry.clone()) - }; + metrics + } else { + MetricsService::new(telemetry.clone()) + }; // Periodically updated metrics and telemetry updates. - spawn_handle.spawn("telemetry-periodic-send", - metrics_service.run( - client.clone(), - transaction_pool.clone(), - network.clone(), - ) + spawn_handle.spawn( + "telemetry-periodic-send", + metrics_service.run(client.clone(), transaction_pool.clone(), network.clone()), ); // RPC - let gen_handler = | - deny_unsafe: sc_rpc::DenyUnsafe, - rpc_middleware: sc_rpc_server::RpcMiddleware - | gen_handler( - deny_unsafe, rpc_middleware, &config, task_manager.spawn_handle(), - client.clone(), transaction_pool.clone(), keystore.clone(), - on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, - backend.offchain_storage(), system_rpc_tx.clone() - ); + let gen_handler = |deny_unsafe: sc_rpc::DenyUnsafe, + rpc_middleware: sc_rpc_server::RpcMiddleware| { + gen_handler( + deny_unsafe, + rpc_middleware, + &config, + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + keystore.clone(), + on_demand.clone(), + remote_blockchain.clone(), + &*rpc_extensions_builder, + backend.offchain_storage(), + system_rpc_tx.clone(), + ) + }; let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.clone())?; // This is used internally, so don't restrict access to unsafe RPC - let rpc_handlers = RpcHandlers(Arc::new(gen_handler( - sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics, "inbrowser") - ).into())); + let rpc_handlers = RpcHandlers(Arc::new( + gen_handler( + sc_rpc::DenyUnsafe::No, + sc_rpc_server::RpcMiddleware::new(rpc_metrics, "inbrowser"), + ) + .into(), + )); // Spawn informant task - spawn_handle.spawn("informant", sc_informant::build( - client.clone(), - network.clone(), - transaction_pool.clone(), - config.informant_output_format, - )); + spawn_handle.spawn( + "informant", + sc_informant::build( + client.clone(), + network.clone(), + transaction_pool.clone(), + config.informant_output_format, + ), + ); task_manager.keep_alive((config.base_path, rpc, rpc_handlers.clone())); @@ -692,10 +678,9 @@ async fn transaction_notifications( transaction_pool: Arc, network: Arc::Hash>>, telemetry: Option, -) - where - TBl: BlockT, - TExPool: MaintainedTransactionPool::Hash>, +) where + TBl: BlockT, + TExPool: MaintainedTransactionPool::Hash>, { // transaction notifications transaction_pool @@ -730,9 +715,11 @@ fn init_telemetry>( chain: config.chain_spec.name().to_owned(), genesis_hash: format!("{:?}", genesis_hash), authority: config.role.is_authority(), - startup_time: SystemTime::UNIX_EPOCH.elapsed() + startup_time: SystemTime::UNIX_EPOCH + .elapsed() .map(|dur| dur.as_millis()) - .unwrap_or(0).to_string(), + .unwrap_or(0) + .to_string(), network_id: network.local_peer_id().to_base58(), }; @@ -753,22 +740,28 @@ fn gen_handler( remote_blockchain: Option>>, rpc_extensions_builder: &(dyn RpcExtensionBuilder + Send), offchain_storage: Option<>::OffchainStorage>, - system_rpc_tx: TracingUnboundedSender> + system_rpc_tx: TracingUnboundedSender>, ) -> sc_rpc_server::RpcHandler - where - TBl: BlockT, - TCl: ProvideRuntimeApi + BlockchainEvents + HeaderBackend + - HeaderMetadata + ExecutorProvider + - CallApiAt + ProofProvider + - StorageProvider + BlockBackend + Send + Sync + 'static, - TExPool: MaintainedTransactionPool::Hash> + 'static, - TBackend: sc_client_api::backend::Backend + 'static, - TRpc: sc_rpc::RpcExtension, - >::Api: - sp_session::SessionKeys + - sp_api::Metadata, +where + TBl: BlockT, + TCl: ProvideRuntimeApi + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + ExecutorProvider + + CallApiAt + + ProofProvider + + StorageProvider + + BlockBackend + + Send + + Sync + + 'static, + TExPool: MaintainedTransactionPool::Hash> + 'static, + TBackend: sc_client_api::backend::Backend + 'static, + TRpc: sc_rpc::RpcExtension, + >::Api: sp_session::SessionKeys + sp_api::Metadata, { - use sc_rpc::{chain, state, author, system, offchain}; + use sc_rpc::{author, chain, offchain, state, system}; let system_info = sc_rpc::system::SystemInfo { chain_name: config.chain_spec.name().into(), @@ -781,43 +774,37 @@ fn gen_handler( let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle); let subscriptions = SubscriptionManager::new(Arc::new(task_executor.clone())); - let (chain, state, child_state) = if let (Some(remote_blockchain), Some(on_demand)) = - (remote_blockchain, on_demand) { - // Light clients - let chain = sc_rpc::chain::new_light( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - on_demand.clone(), - ); - let (state, child_state) = sc_rpc::state::new_light( - client.clone(), - subscriptions.clone(), - remote_blockchain.clone(), - on_demand, - deny_unsafe, - ); - (chain, state, child_state) - - } else { - // Full nodes - let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); - let (state, child_state) = sc_rpc::state::new_full( - client.clone(), - subscriptions.clone(), - deny_unsafe, - config.rpc_max_payload, - ); - (chain, state, child_state) - }; + let (chain, state, child_state) = + if let (Some(remote_blockchain), Some(on_demand)) = (remote_blockchain, on_demand) { + // Light clients + let chain = sc_rpc::chain::new_light( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + on_demand.clone(), + ); + let (state, child_state) = sc_rpc::state::new_light( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + on_demand, + deny_unsafe, + ); + (chain, state, child_state) + } else { + // Full nodes + let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); + let (state, child_state) = sc_rpc::state::new_full( + client.clone(), + subscriptions.clone(), + deny_unsafe, + config.rpc_max_payload, + ); + (chain, state, child_state) + }; - let author = sc_rpc::author::Author::new( - client, - transaction_pool, - subscriptions, - keystore, - deny_unsafe, - ); + let author = + sc_rpc::author::Author::new(client, transaction_pool, subscriptions, keystore, deny_unsafe); let system = system::System::new(system_info, system_rpc_tx, deny_unsafe); let maybe_offchain_rpc = offchain_storage.map(|storage| { @@ -835,7 +822,7 @@ fn gen_handler( system::SystemApi::to_delegate(system), rpc_extensions_builder.build(deny_unsafe, task_executor), ), - rpc_middleware + rpc_middleware, ) } @@ -854,32 +841,42 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// An optional, shared data fetcher for light clients. pub on_demand: Option>>, /// A block announce validator builder. - pub block_announce_validator_builder: Option) -> Box + Send> + Send - >>, + pub block_announce_validator_builder: + Option) -> Box + Send> + Send>>, } /// Build the network service, the network status sinks and an RPC sender. pub fn build_network( - params: BuildNetworkParams + params: BuildNetworkParams, ) -> Result< ( Arc::Hash>>, TracingUnboundedSender>, NetworkStarter, ), - Error + Error, > - where - TBl: BlockT, - TCl: ProvideRuntimeApi + HeaderMetadata + Chain + - BlockBackend + BlockIdTo + ProofProvider + - HeaderBackend + BlockchainEvents + 'static, - TExPool: MaintainedTransactionPool::Hash> + 'static, - TImpQu: ImportQueue + 'static, +where + TBl: BlockT, + TCl: ProvideRuntimeApi + + HeaderMetadata + + Chain + + BlockBackend + + BlockIdTo + + ProofProvider + + HeaderBackend + + BlockchainEvents + + 'static, + TExPool: MaintainedTransactionPool::Hash> + 'static, + TImpQu: ImportQueue + 'static, { let BuildNetworkParams { - config, client, transaction_pool, spawn_handle, import_queue, on_demand, + config, + client, + transaction_pool, + spawn_handle, + import_queue, + on_demand, block_announce_validator_builder, } = params; @@ -906,8 +903,8 @@ pub fn build_network( let (handler, protocol_config) = BlockRequestHandler::new( &protocol_id, client.clone(), - config.network.default_peers_set.in_peers as usize - + config.network.default_peers_set.out_peers as usize, + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, ); spawn_handle.spawn("block_request_handler", handler.run()); protocol_config @@ -923,8 +920,8 @@ pub fn build_network( let (handler, protocol_config) = StateRequestHandler::new( &protocol_id, client.clone(), - config.network.default_peers_set.in_peers as usize - + config.network.default_peers_set.out_peers as usize, + config.network.default_peers_set.in_peers as usize + + config.network.default_peers_set.out_peers as usize, ); spawn_handle.spawn("state_request_handler", handler.run()); protocol_config @@ -937,10 +934,8 @@ pub fn build_network( light_client_requests::generate_protocol_config(&protocol_id) } else { // Allow both outgoing and incoming requests. - let (handler, protocol_config) = LightClientRequestHandler::new( - &protocol_id, - client.clone(), - ); + let (handler, protocol_config) = + LightClientRequestHandler::new(&protocol_id, client.clone()); spawn_handle.spawn("light_client_request_handler", handler.run()); protocol_config } @@ -962,7 +957,7 @@ pub fn build_network( }, network_config: config.network.clone(), chain: client.clone(), - on_demand: on_demand, + on_demand, transaction_pool: transaction_pool_adapter as _, import_queue: Box::new(import_queue), protocol_id, @@ -976,10 +971,8 @@ pub fn build_network( // Storage chains don't keep full block history and can't be synced in full mode. // Force fast sync when storage chain mode is enabled. if matches!(config.transaction_storage, TransactionStorageMode::StorageChain) { - network_params.network_config.sync_mode = SyncMode::Fast { - storage_chain_mode: true, - skip_proofs: false, - }; + network_params.network_config.sync_mode = + SyncMode::Fast { storage_chain_mode: true, skip_proofs: false }; } let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); @@ -1028,7 +1021,7 @@ pub fn build_network( ); // This `return` might seem unnecessary, but we don't want to make it look like // everything is working as normal even though the user is clearly misusing the API. - return; + return } future.await diff --git a/substrate/client/service/src/chain_ops/check_block.rs b/substrate/client/service/src/chain_ops/check_block.rs index 94f6d25c9eb8f2ed6f2cc9c3e8285da66adef730..ab924a3f7d9ddb3d4875c4bc23a992f956fe509b 100644 --- a/substrate/client/service/src/chain_ops/check_block.rs +++ b/substrate/client/service/src/chain_ops/check_block.rs @@ -17,22 +17,20 @@ // along with this program. If not, see . use crate::error::Error; -use futures::{future, prelude::*}; -use sp_runtime::traits::Block as BlockT; -use sp_runtime::generic::BlockId; use codec::Encode; -use sp_consensus::import_queue::ImportQueue; +use futures::{future, prelude::*}; use sc_client_api::{BlockBackend, UsageProvider}; +use sp_consensus::import_queue::ImportQueue; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use std::pin::Pin; -use std::sync::Arc; use crate::chain_ops::import_blocks; +use std::{pin::Pin, sync::Arc}; /// Re-validate known block. pub fn check_block( client: Arc, import_queue: IQ, - block_id: BlockId + block_id: BlockId, ) -> Pin> + Send>> where C: BlockBackend + UsageProvider + Send + Sync + 'static, @@ -46,7 +44,7 @@ where block.encode_to(&mut buf); let reader = std::io::Cursor::new(buf); import_blocks(client, import_queue, reader, true, true) - } + }, Ok(None) => Box::pin(future::err("Unknown block".into())), Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), } diff --git a/substrate/client/service/src/chain_ops/export_blocks.rs b/substrate/client/service/src/chain_ops/export_blocks.rs index 1d9325d1d7452f3318f73a6935167a5da9ad8ee2..8887180103182314e3cf12cef9c0766e511f4f29 100644 --- a/substrate/client/service/src/chain_ops/export_blocks.rs +++ b/substrate/client/service/src/chain_ops/export_blocks.rs @@ -17,18 +17,16 @@ // along with this program. If not, see . use crate::error::Error; -use log::info; +use codec::Encode; use futures::{future, prelude::*}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, One, Zero, SaturatedConversion +use log::info; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor, One, SaturatedConversion, Zero}, }; -use sp_runtime::generic::BlockId; -use codec::Encode; -use std::{io::Write, pin::Pin}; use sc_client_api::{BlockBackend, UsageProvider}; -use std::sync::Arc; -use std::task::Poll; +use std::{io::Write, pin::Pin, sync::Arc, task::Poll}; /// Performs the blocks export. pub fn export_blocks( @@ -36,7 +34,7 @@ pub fn export_blocks( mut output: impl Write + 'static, from: NumberFor, to: Option>, - binary: bool + binary: bool, ) -> Pin>>> where C: BlockBackend + UsageProvider + 'static, @@ -63,7 +61,7 @@ where let client = &client; if last < block { - return Poll::Ready(Err("Invalid block range specified".into())); + return Poll::Ready(Err("Invalid block range specified".into())) } if !wrote_header { @@ -78,14 +76,13 @@ where } match client.block(&BlockId::number(block))? { - Some(block) => { + Some(block) => if binary { output.write_all(&block.encode())?; } else { serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; - } - }, + }, // Reached end of the chain. None => return Poll::Ready(Ok(())), } @@ -93,7 +90,7 @@ where info!("#{}", block); } if block == last { - return Poll::Ready(Ok(())); + return Poll::Ready(Ok(())) } block += One::one(); diff --git a/substrate/client/service/src/chain_ops/export_raw_state.rs b/substrate/client/service/src/chain_ops/export_raw_state.rs index 71822cf6275f8d025e3e864a376ca1d6e1472d95..975149c61cfab9dc8bff8f569ec79d38f39eeb77 100644 --- a/substrate/client/service/src/chain_ops/export_raw_state.rs +++ b/substrate/client/service/src/chain_ops/export_raw_state.rs @@ -17,10 +17,9 @@ // along with this program. If not, see . use crate::error::Error; -use sp_runtime::traits::Block as BlockT; -use sp_runtime::generic::BlockId; -use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; use sc_client_api::{StorageProvider, UsageProvider}; +use sp_core::storage::{well_known_keys, ChildInfo, Storage, StorageChild, StorageKey, StorageMap}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use std::{collections::HashMap, sync::Arc}; @@ -35,9 +34,7 @@ where B: BlockT, BA: sc_client_api::backend::Backend, { - let block = block.unwrap_or_else( - || BlockId::Hash(client.usage_info().chain.best_hash) - ); + let block = block.unwrap_or_else(|| BlockId::Hash(client.usage_info().chain.best_hash)); let empty_key = StorageKey(Vec::new()); let mut top_storage = client.storage_pairs(&block, &empty_key)?; @@ -47,12 +44,12 @@ where // pairs. while let Some(pos) = top_storage .iter() - .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { + .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) + { let (key, _) = top_storage.swap_remove(pos); - let key = StorageKey( - key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), - ); + let key = + StorageKey(key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec()); let child_info = ChildInfo::new_default(&key.0); let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; diff --git a/substrate/client/service/src/chain_ops/import_blocks.rs b/substrate/client/service/src/chain_ops/import_blocks.rs index 75ea6670f352536dc0463a0c28fb5b62bb4d218b..ecf028ffeb3f0e5846fd063163914f38c2c00f83 100644 --- a/substrate/client/service/src/chain_ops/import_blocks.rs +++ b/substrate/client/service/src/chain_ops/import_blocks.rs @@ -16,29 +16,31 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; -use crate::error::Error; -use sc_chain_spec::ChainSpec; -use log::{warn, info}; -use futures::{future, prelude::*}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, Zero, Header, MaybeSerializeDeserialize, -}; -use sp_runtime::generic::SignedBlock; +use crate::{error, error::Error}; use codec::{Decode, IoReader as CodecIoReader}; +use futures::{future, prelude::*}; +use log::{info, warn}; +use sc_chain_spec::ChainSpec; use sp_consensus::{ + import_queue::{BlockImportError, BlockImportResult, ImportQueue, IncomingBlock, Link}, BlockOrigin, - import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, +}; +use sp_runtime::{ + generic::SignedBlock, + traits::{Block as BlockT, Header, MaybeSerializeDeserialize, NumberFor, Zero}, }; -use std::{io::{Read, Seek}, pin::Pin}; -use std::time::{Duration, Instant}; use futures_timer::Delay; -use std::task::Poll; +use sc_client_api::UsageProvider; use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; -use std::convert::{TryFrom, TryInto}; use sp_runtime::traits::{CheckedDiv, Saturating}; -use sc_client_api::UsageProvider; +use std::{ + convert::{TryFrom, TryInto}, + io::{Read, Seek}, + pin::Pin, + task::Poll, + time::{Duration, Instant}, +}; /// Number of blocks we will add to the queue before waiting for the queue to catch up. const MAX_PENDING_BLOCKS: u64 = 1_024; @@ -56,11 +58,11 @@ pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { spec.as_json(raw).map_err(Into::into) } - /// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder /// (from serde_json). Implements the Iterator Trait, calling `next()` will decode the next /// SignedBlock and return it. -enum BlockIter where +enum BlockIter +where R: std::io::Read + std::io::Seek, { Binary { @@ -79,7 +81,8 @@ enum BlockIter where }, } -impl BlockIter where +impl BlockIter +where R: Read + Seek + 'static, B: BlockT + MaybeSerializeDeserialize, { @@ -90,40 +93,32 @@ impl BlockIter where // of blocks that are going to be decoded. We read it and add it to our enum struct. let num_expected_blocks: u64 = Decode::decode(&mut reader) .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?; - Ok(BlockIter::Binary { - num_expected_blocks, - read_block_count: 0, - reader, - }) + Ok(BlockIter::Binary { num_expected_blocks, read_block_count: 0, reader }) } else { - let stream_deser = Deserializer::from_reader(input) - .into_iter::>(); - Ok(BlockIter::Json { - reader: stream_deser, - read_block_count: 0, - }) + let stream_deser = Deserializer::from_reader(input).into_iter::>(); + Ok(BlockIter::Json { reader: stream_deser, read_block_count: 0 }) } } /// Returns the number of blocks read thus far. fn read_block_count(&self) -> u64 { match self { - BlockIter::Binary { read_block_count, .. } - | BlockIter::Json { read_block_count, .. } - => *read_block_count, + BlockIter::Binary { read_block_count, .. } | + BlockIter::Json { read_block_count, .. } => *read_block_count, } } /// Returns the total number of blocks to be imported, if possible. fn num_expected_blocks(&self) -> Option { match self { - BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks), - BlockIter::Json {..} => None + BlockIter::Binary { num_expected_blocks, .. } => Some(*num_expected_blocks), + BlockIter::Json { .. } => None, } } } -impl Iterator for BlockIter where +impl Iterator for BlockIter +where R: Read + Seek + 'static, B: BlockT + MaybeSerializeDeserialize, { @@ -133,20 +128,20 @@ impl Iterator for BlockIter where match self { BlockIter::Binary { num_expected_blocks, read_block_count, reader } => { if read_block_count < num_expected_blocks { - let block_result: Result, _> = SignedBlock::::decode(reader) - .map_err(|e| e.to_string()); + let block_result: Result, _> = + SignedBlock::::decode(reader).map_err(|e| e.to_string()); *read_block_count += 1; Some(block_result) } else { // `read_block_count` == `num_expected_blocks` so we've read enough blocks. None } - } + }, BlockIter::Json { reader, read_block_count } => { let res = Some(reader.next()?.map_err(|e| e.to_string())); *read_block_count += 1; res - } + }, } } } @@ -155,7 +150,7 @@ impl Iterator for BlockIter where fn import_block_to_queue( signed_block: SignedBlock, queue: &mut TImpQu, - force: bool + force: bool, ) where TBl: BlockT + MaybeSerializeDeserialize, TImpQu: 'static + ImportQueue, @@ -163,8 +158,9 @@ fn import_block_to_queue( let (header, extrinsics) = signed_block.block.deconstruct(); let hash = header.hash(); // import queue handles verification and importing it into the client. - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock:: { + queue.import_blocks( + BlockOrigin::File, + vec![IncomingBlock:: { hash, header: Some(header), body: Some(extrinsics), @@ -175,15 +171,15 @@ fn import_block_to_queue( import_existing: force, state: None, skip_execution: false, - } - ]); + }], + ); } /// Returns true if we have imported every block we were supposed to import, else returns false. fn importing_is_done( num_expected_blocks: Option, read_block_count: u64, - imported_blocks: u64 + imported_blocks: u64, ) -> bool { if let Some(num_expected_blocks) = num_expected_blocks { imported_blocks >= num_expected_blocks @@ -209,7 +205,7 @@ impl Speedometer { } } - /// Calculates `(best_number - last_number) / (now - last_update)` and + /// Calculates `(best_number - last_number) / (now - last_update)` and /// logs the speed of import. fn display_speed(&self) { // Number of milliseconds elapsed since last time. @@ -223,24 +219,28 @@ impl Speedometer { // Number of blocks that have been imported since last time. let diff = match self.last_number { None => return, - Some(n) => self.best_number.saturating_sub(n) + Some(n) => self.best_number.saturating_sub(n), }; if let Ok(diff) = TryInto::::try_into(diff) { // If the number of blocks can be converted to a regular integer, then it's easy: just // do the math and turn it into a `f64`. - let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / 10.0; + let speed = diff + .saturating_mul(10_000) + .checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) / + 10.0; info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); } else { // If the number of blocks can't be converted to a regular integer, then we need a more // algebraic approach and we stay within the realm of integers. let one_thousand = NumberFor::::from(1_000u32); - let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::MAX) - ); + let elapsed = + NumberFor::::from(>::try_from(elapsed_ms).unwrap_or(u32::MAX)); - let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) + let speed = diff + .saturating_mul(one_thousand) + .checked_div(&elapsed) .unwrap_or_else(Zero::zero); info!("📦 Current best block: {} ({} bps)", self.best_number, speed) } @@ -265,22 +265,23 @@ impl Speedometer { } /// Different State that the `import_blocks` future could be in. -enum ImportState where +enum ImportState +where R: Read + Seek + 'static, B: BlockT + MaybeSerializeDeserialize, { /// We are reading from the BlockIter structure, adding those blocks to the queue if possible. - Reading{block_iter: BlockIter}, + Reading { block_iter: BlockIter }, /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to /// catch up. - WaitingForImportQueueToCatchUp{ + WaitingForImportQueueToCatchUp { block_iter: BlockIter, delay: Delay, - block: SignedBlock + block: SignedBlock, }, // We have added all the blocks to the queue but they are still being processed. - WaitingForImportQueueToFinish{ - num_expected_blocks: Option, + WaitingForImportQueueToFinish { + num_expected_blocks: Option, read_block_count: u64, delay: Delay, }, @@ -306,10 +307,7 @@ where impl WaitLink { fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, - } + WaitLink { imported_blocks: 0, has_error: false } } } @@ -318,7 +316,7 @@ where &mut self, imported: usize, _num_expected_blocks: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { self.imported_blocks += imported as u64; @@ -326,7 +324,7 @@ where if let (Err(err), hash) = result { warn!("There was an error importing block with hash {:?}: {:?}", hash, err); self.has_error = true; - break; + break } } } @@ -338,13 +336,13 @@ where let block_iter = match block_iter_res { Ok(block_iter) => block_iter, Err(e) => { - // We've encountered an error while creating the block iterator + // We've encountered an error while creating the block iterator // so we can just return a future that returns an error. return future::ready(Err(Error::Other(e))).boxed() - } + }, }; - let mut state = Some(ImportState::Reading{block_iter}); + let mut state = Some(ImportState::Reading { block_iter }); let mut speedometer = Speedometer::::new(); // Importing blocks is implemented as a future, because we want the operation to be @@ -358,7 +356,7 @@ where let client = &client; let queue = &mut import_queue; match state.take().expect("state should never be None; qed") { - ImportState::Reading{mut block_iter} => { + ImportState::Reading { mut block_iter } => { match block_iter.next() { None => { // The iterator is over: we now need to wait for the import queue to finish. @@ -366,7 +364,9 @@ where let read_block_count = block_iter.read_block_count(); let delay = Delay::new(Duration::from_millis(DELAY_TIME)); state = Some(ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, delay + num_expected_blocks, + read_block_count, + delay, }); }, Some(block_result) => { @@ -378,32 +378,35 @@ where // until the queue has made some progress. let delay = Delay::new(Duration::from_millis(DELAY_TIME)); state = Some(ImportState::WaitingForImportQueueToCatchUp { - block_iter, delay, block + block_iter, + delay, + block, }); } else { // Queue is not full, we can keep on adding blocks to the queue. import_block_to_queue(block, queue, force); - state = Some(ImportState::Reading{block_iter}); + state = Some(ImportState::Reading { block_iter }); } - } - Err(e) => { - return Poll::Ready( - Err(Error::Other( - format!("Error reading block #{}: {}", read_block_count, e) - ))) - } + }, + Err(e) => + return Poll::Ready(Err(Error::Other(format!( + "Error reading block #{}: {}", + read_block_count, e + )))), } - } + }, } }, - ImportState::WaitingForImportQueueToCatchUp{block_iter, mut delay, block} => { + ImportState::WaitingForImportQueueToCatchUp { block_iter, mut delay, block } => { let read_block_count = block_iter.read_block_count(); if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { // Queue is still full, so wait until there is room to insert our block. match Pin::new(&mut delay).poll(cx) { Poll::Pending => { state = Some(ImportState::WaitingForImportQueueToCatchUp { - block_iter, delay, block + block_iter, + delay, + block, }); return Poll::Pending }, @@ -412,25 +415,30 @@ where }, } state = Some(ImportState::WaitingForImportQueueToCatchUp { - block_iter, delay, block + block_iter, + delay, + block, }); } else { // Queue is no longer full, so we can add our block to the queue. import_block_to_queue(block, queue, force); // Switch back to Reading state. - state = Some(ImportState::Reading{block_iter}); + state = Some(ImportState::Reading { block_iter }); } }, ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, mut delay + num_expected_blocks, + read_block_count, + mut delay, } => { - // All the blocks have been added to the queue, which doesn't mean they + // All the blocks have been added to the queue, which doesn't mean they // have all been properly imported. if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { // Importing is done, we can log the result and return. info!( "🎉 Imported {} blocks. Best: #{}", - read_block_count, client.usage_info().chain.best_number + read_block_count, + client.usage_info().chain.best_number ); return Poll::Ready(Ok(())) } else { @@ -439,7 +447,9 @@ where match Pin::new(&mut delay).poll(cx) { Poll::Pending => { state = Some(ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, delay + num_expected_blocks, + read_block_count, + delay, }); return Poll::Pending }, @@ -449,10 +459,12 @@ where } state = Some(ImportState::WaitingForImportQueueToFinish { - num_expected_blocks, read_block_count, delay + num_expected_blocks, + read_block_count, + delay, }); } - } + }, } queue.poll_actions(cx, &mut link); @@ -461,11 +473,10 @@ where speedometer.notify_user(best_number); if link.has_error { - return Poll::Ready(Err( - Error::Other( - format!("Stopping after #{} blocks because of an error", link.imported_blocks) - ) - )) + return Poll::Ready(Err(Error::Other(format!( + "Stopping after #{} blocks because of an error", + link.imported_blocks + )))) } cx.waker().wake_by_ref(); diff --git a/substrate/client/service/src/chain_ops/revert_chain.rs b/substrate/client/service/src/chain_ops/revert_chain.rs index e3301eb2627e2bc2db80d4960e4189639493baff..63f1cbd15dd6336bd97f63f7fccdf6b781340ffa 100644 --- a/substrate/client/service/src/chain_ops/revert_chain.rs +++ b/substrate/client/service/src/chain_ops/revert_chain.rs @@ -18,15 +18,15 @@ use crate::error::Error; use log::info; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use sc_client_api::{Backend, UsageProvider}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::sync::Arc; /// Performs a revert of `blocks` blocks. pub fn revert_chain( client: Arc, backend: Arc, - blocks: NumberFor + blocks: NumberFor, ) -> Result<(), Error> where B: BlockT, diff --git a/substrate/client/service/src/client/block_rules.rs b/substrate/client/service/src/client/block_rules.rs index 1af06666339ccc49433c59add55c85f73303db06..4bdf33836296000710117f4a8669ef8b678b4c05 100644 --- a/substrate/client/service/src/client/block_rules.rs +++ b/substrate/client/service/src/client/block_rules.rs @@ -20,11 +20,9 @@ use std::collections::{HashMap, HashSet}; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sc_client_api::{ForkBlocks, BadBlocks}; +use sc_client_api::{BadBlocks, ForkBlocks}; /// Chain specification rules lookup result. pub enum LookupResult { @@ -33,7 +31,7 @@ pub enum LookupResult { /// The block is known to be bad and should not be imported KnownBad, /// There is a specified canonical block hash for the given height - Expected(B::Hash) + Expected(B::Hash), } /// Chain-specific block filtering rules. @@ -47,10 +45,7 @@ pub struct BlockRules { impl BlockRules { /// New block rules with provided black and white lists. - pub fn new( - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, - ) -> Self { + pub fn new(fork_blocks: ForkBlocks, bad_blocks: BadBlocks) -> Self { Self { bad: bad_blocks.unwrap_or_else(|| HashSet::new()), forks: fork_blocks.unwrap_or_else(|| vec![]).into_iter().collect(), @@ -66,7 +61,7 @@ impl BlockRules { pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { if let Some(hash_for_height) = self.forks.get(&number) { if hash_for_height != hash { - return LookupResult::Expected(hash_for_height.clone()); + return LookupResult::Expected(hash_for_height.clone()) } } diff --git a/substrate/client/service/src/client/call_executor.rs b/substrate/client/service/src/client/call_executor.rs index a4448199476078a0dbbb39395728af670765a802..6d4fe3c36013138f1efd9cf639119d0505e04a98 100644 --- a/substrate/client/service/src/client/call_executor.rs +++ b/substrate/client/service/src/client/call_executor.rs @@ -16,23 +16,25 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{sync::Arc, panic::UnwindSafe, result, cell::RefCell}; -use codec::{Encode, Decode}; +use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use codec::{Decode, Encode}; +use sc_client_api::{backend, call_executor::CallExecutor}; +use sc_executor::{NativeVersion, RuntimeInfo, RuntimeVersion}; +use sp_api::{ProofRecorder, StorageTransactionCache}; +use sp_core::{ + traits::{CodeExecutor, RuntimeCode, SpawnNamed}, + NativeOrEncoded, NeverNativeValue, +}; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, + generic::BlockId, + traits::{Block as BlockT, HashFor, NumberFor}, }; use sp_state_machine::{ - self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::Backend as _, StorageProof, + self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, + StateMachine, StorageProof, }; -use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; -use sp_externalities::Extensions; -use sp_core::{ - NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, SpawnNamed, RuntimeCode}, -}; -use sp_api::{ProofRecorder, StorageTransactionCache}; -use sc_client_api::{backend, call_executor::CallExecutor}; -use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; /// Call executor that executes methods locally, querying all required /// data from local backend. @@ -57,7 +59,8 @@ where spawn_handle: Box, client_config: ClientConfig, ) -> sp_blockchain::Result { - let wasm_override = client_config.wasm_runtime_overrides + let wasm_override = client_config + .wasm_runtime_overrides .as_ref() .map(|p| WasmOverride::new(p.clone(), executor.clone())) .transpose()?; @@ -91,10 +94,12 @@ where B: backend::Backend, { let spec = self.runtime_version(id)?.spec_version; - let code = if let Some(d) = self.wasm_override + let code = if let Some(d) = self + .wasm_override .as_ref() .map(|o| o.get(&spec, onchain_code.heap_pages)) - .flatten() { + .flatten() + { log::debug!(target: "wasm_overrides", "using WASM override for block {}", id); d } else if let Some(s) = self.wasm_substitutes.get(spec, onchain_code.heap_pages, id) { @@ -113,7 +118,10 @@ where } } -impl Clone for LocalCallExecutor where E: Clone { +impl Clone for LocalCallExecutor +where + E: Clone, +{ fn clone(&self) -> Self { LocalCallExecutor { backend: self.backend.clone(), @@ -145,13 +153,12 @@ where extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let changes_trie = backend::changes_tries_state_at_block( - id, self.backend.changes_trie_storage() - )?; + let changes_trie = + backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; let state = self.backend.state_at(*id)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = state_runtime_code.runtime_code() - .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, id)?; let return_data = StateMachine::new( @@ -164,7 +171,8 @@ where extensions.unwrap_or_default(), &runtime_code, self.spawn_handle.clone(), - ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + ) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( strategy.get_manager(), None, )?; @@ -175,7 +183,7 @@ where fn contextual_call< EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, @@ -185,15 +193,17 @@ where method: &str, call_data: &[u8], changes: &RefCell, - storage_transaction_cache: Option<&RefCell< - StorageTransactionCache - >>, + storage_transaction_cache: Option<&RefCell>>, execution_manager: ExecutionManager, native_call: Option, recorder: &Option>, extensions: Option, - ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { - let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; + ) -> Result, sp_blockchain::Error> + where + ExecutionManager: Clone, + { + let changes_trie_state = + backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); let mut state = self.backend.state_at(*at)?; @@ -202,16 +212,17 @@ where match recorder { Some(recorder) => { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box - )?; + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_state); + let state_runtime_code = + sp_state_machine::backend::BackendRuntimeCode::new(trie_state); // It is important to extract the runtime code here before we create the proof // recorder. - let runtime_code = state_runtime_code.runtime_code() - .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, at)?; let backend = sp_state_machine::ProvingBackend::new_with_recorder( @@ -239,8 +250,8 @@ where }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = state_runtime_code.runtime_code() - .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, at)?; let mut state_machine = StateMachine::new( @@ -253,34 +264,31 @@ where extensions.unwrap_or_default(), &runtime_code, self.spawn_handle.clone(), - ).with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)); + ) + .with_storage_transaction_cache( + storage_transaction_cache.as_mut().map(|c| &mut **c), + ); state_machine.execute_using_consensus_failure_handler( execution_manager, native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), ) - } - }.map_err(Into::into) + }, + } + .map_err(Into::into) } fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let changes_trie_state = backend::changes_tries_state_at_block( - id, - self.backend.changes_trie_storage(), - )?; + let changes_trie_state = + backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; let state = self.backend.state_at(*id)?; let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &state, - changes_trie_state, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &state, changes_trie_state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = state_runtime_code.runtime_code() - .map_err(sp_blockchain::Error::RuntimeCode)?; - self.executor.runtime_version(&mut ext, &runtime_code) + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + self.executor + .runtime_version(&mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } @@ -289,11 +297,11 @@ where trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, method: &str, - call_data: &[u8] + call_data: &[u8], ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_state); - let runtime_code = state_runtime_code.runtime_code() - .map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( trie_state, overlay, @@ -312,19 +320,16 @@ where } impl sp_version::GetRuntimeVersion for LocalCallExecutor - where - B: backend::Backend, - E: CodeExecutor + RuntimeInfo + Clone + 'static, - Block: BlockT, +where + B: backend::Backend, + E: CodeExecutor + RuntimeInfo + Clone + 'static, + Block: BlockT, { fn native_version(&self) -> &sp_version::NativeVersion { self.executor.native_version() } - fn runtime_version( - &self, - at: &BlockId, - ) -> Result { + fn runtime_version(&self, at: &BlockId) -> Result { CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) } } @@ -332,10 +337,13 @@ impl sp_version::GetRuntimeVersion for LocalCallExecutor = Mutex>>; /// Substrate Client -pub struct Client where Block: BlockT { +pub struct Client +where + Block: BlockT, +{ backend: Arc, executor: E, storage_notifications: Mutex>, @@ -157,7 +153,7 @@ enum PrepareStorageChangesResult, Block: BlockT> { } /// Create an instance of in-memory client. -#[cfg(feature="test-helpers")] +#[cfg(feature = "test-helpers")] pub fn new_in_mem( executor: E, genesis_storage: &S, @@ -166,12 +162,10 @@ pub fn new_in_mem( telemetry: Option, spawn_handle: Box, config: ClientConfig, -) -> sp_blockchain::Result, - LocalCallExecutor, E>, - Block, - RA ->> where +) -> sp_blockchain::Result< + Client, LocalCallExecutor, E>, Block, RA>, +> +where E: CodeExecutor + RuntimeInfo, S: BuildStorage, Block: BlockT, @@ -218,7 +212,7 @@ impl Default for ClientConfig { /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. -#[cfg(feature="test-helpers")] +#[cfg(feature = "test-helpers")] pub fn new_with_backend( backend: Arc, executor: E, @@ -229,13 +223,14 @@ pub fn new_with_backend( telemetry: Option, config: ClientConfig, ) -> sp_blockchain::Result, Block, RA>> - where - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, - Block: BlockT, - B: backend::LocalBackend + 'static, +where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, + B: backend::LocalBackend + 'static, { - let call_executor = LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone())?; + let call_executor = + LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone())?; let extensions = ExecutionExtensions::new( Default::default(), keystore, @@ -254,7 +249,8 @@ pub fn new_with_backend( ) } -impl BlockOf for Client where +impl BlockOf for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -263,15 +259,15 @@ impl BlockOf for Client where } impl LockImportRun for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, { let inner = || { let _import_lock = self.backend.get_import_lock().write(); @@ -301,21 +297,22 @@ impl LockImportRun for Client } impl LockImportRun for &Client - where - Block: BlockT, - B: backend::Backend, - E: CallExecutor, +where + Block: BlockT, + B: backend::Backend, + E: CallExecutor, { fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, { (**self).lock_import_and_run(f) } } -impl Client where +impl Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -335,12 +332,13 @@ impl Client where ) -> sp_blockchain::Result { let info = backend.blockchain().info(); if info.finalized_state.is_none() { - let genesis_storage = build_genesis_storage.build_storage() - .map_err(sp_blockchain::Error::Storage)?; + let genesis_storage = + build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; let mut op = backend.begin_operation()?; let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); - info!("🔨 Initializing Genesis block/state (state: {}, header-hash: {})", + info!( + "🔨 Initializing Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash() ); @@ -396,8 +394,11 @@ impl Client where /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? - .expect("None is returned if there's no value stored for the given key;\ - ':code' key is always defined; qed").0) + .expect( + "None is returned if there's no value stored for the given key;\ + ':code' key is always defined; qed", + ) + .0) } /// Get the RuntimeVersion at a given block. @@ -411,7 +412,9 @@ impl Client where id: &BlockId, cht_size: NumberFor, ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - let proof_error = || sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)); + let proof_error = || { + sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)) + }; let header = self.backend.blockchain().expect_header(*id)?; let block_num = *header.number(); let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; @@ -449,12 +452,13 @@ impl Client where required_roots_proofs: Mutex, Block::Hash>>, } - impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> for - AccessedRootsRecorder<'a, Block> + impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> + for AccessedRootsRecorder<'a, Block> { - fn build_anchor(&self, hash: Block::Hash) - -> Result>, String> - { + fn build_anchor( + &self, + hash: Block::Hash, + ) -> Result>, String> { self.storage.build_anchor(hash) } @@ -466,22 +470,19 @@ impl Client where let root = self.storage.root(anchor, block)?; if block < self.min { if let Some(ref root) = root { - self.required_roots_proofs.lock().insert( - block, - root.clone() - ); + self.required_roots_proofs.lock().insert(block, root.clone()); } } Ok(root) } } - impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> for - AccessedRootsRecorder<'a, Block> + impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> + for AccessedRootsRecorder<'a, Block> { - fn as_roots_storage(&self) - -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> - { + fn as_roots_storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { self } @@ -498,10 +499,11 @@ impl Client where } } - let first_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(first))?; + let first_number = + self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(first))?; let (storage, configs) = self.require_changes_trie(first_number, last, true)?; - let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; + let min_number = + self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { storage: storage.storage(), @@ -517,8 +519,8 @@ impl Client where // fetch key changes proof let mut proof = Vec::new(); for (config_zero, config_end, config) in configs { - let last_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(last))?; + let last_number = + self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(last))?; let config_range = ChangesTrieConfigurationRange { config: &config, zero: config_zero, @@ -528,10 +530,7 @@ impl Client where config_range, &recording_storage, first_number, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last), - number: last_number, - }, + &ChangesTrieAnchorBlockId { hash: convert_hash(&last), number: last_number }, max_number, storage_key, &key.0, @@ -554,20 +553,26 @@ impl Client where } /// Generate CHT-based proof for roots of changes tries at given blocks. - fn changes_trie_roots_proof>>( + fn changes_trie_roots_proof>>( &self, cht_size: NumberFor, - blocks: I + blocks: I, ) -> sp_blockchain::Result { // most probably we have touched several changes tries that are parts of the single CHT // => GroupBy changes tries by CHT number and then gather proof for the whole group at once let mut proofs = Vec::new(); - cht::for_each_cht_group::(cht_size, blocks, |_, cht_num, cht_blocks| { - let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; - proofs.push(cht_proof); - Ok(()) - }, ())?; + cht::for_each_cht_group::( + cht_size, + blocks, + |_, cht_num, cht_blocks| { + let cht_proof = + self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; + proofs.push(cht_proof); + Ok(()) + }, + (), + )?; Ok(StorageProof::merge(proofs)) } @@ -577,7 +582,7 @@ impl Client where &self, cht_size: NumberFor, cht_num: NumberFor, - blocks: Vec> + blocks: Vec>, ) -> sp_blockchain::Result { let cht_start = cht::start_number(cht_size, cht_num); let mut current_num = cht_start; @@ -586,16 +591,14 @@ impl Client where current_num = current_num + One::one(); Some(old_current_num) }); - let roots = cht_range - .map(|num| self.header(&BlockId::Number(num)) - .map(|block| - block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())) - ); + let roots = cht_range.map(|num| { + self.header(&BlockId::Number(num)).map(|block| { + block + .and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned()) + }) + }); let proof = cht::build_proof::, _, _>( - cht_size, - cht_num, - blocks, - roots, + cht_size, cht_num, blocks, roots, )?; Ok(proof) } @@ -616,7 +619,9 @@ impl Client where &dyn PrunableStateChangesTrieStorage, Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, )> { - let storage = self.backend.changes_trie_storage() + let storage = self + .backend + .changes_trie_storage() .ok_or_else(|| sp_blockchain::Error::ChangesTriesNotSupported)?; let mut configs = Vec::with_capacity(1); @@ -630,10 +635,14 @@ impl Client where } if config_range.zero.0 < first { - break; + break } - current = *self.backend.blockchain().expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); + current = *self + .backend + .blockchain() + .expect_header(BlockId::Hash(config_range.zero.1))? + .parent_hash(); } Ok((storage, configs)) @@ -646,11 +655,14 @@ impl Client where operation: &mut ClientImportOperation, import_block: BlockImportParams>, new_cache: HashMap>, - storage_changes: Option>>, - ) -> sp_blockchain::Result where + storage_changes: Option< + sp_consensus::StorageChanges>, + >, + ) -> sp_blockchain::Result + where Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + >::Api: + CoreApi + ApiExt, { let BlockImportParams { origin, @@ -711,9 +723,7 @@ impl Client where // don't send telemetry block import events during initial sync for every // block to avoid spamming the telemetry server, these events will be randomly // sent at a rate of 1/10. - if origin != BlockOrigin::NetworkInitialSync || - rand::thread_rng().gen_bool(0.1) - { + if origin != BlockOrigin::NetworkInitialSync || rand::thread_rng().gen_bool(0.1) { telemetry!( self.telemetry; SUBSTRATE_INFO; @@ -738,23 +748,26 @@ impl Client where justifications: Option, body: Option>, indexed_body: Option>>, - storage_changes: Option>>, + storage_changes: Option< + sp_consensus::StorageChanges>, + >, new_cache: HashMap>, finalized: bool, aux: Vec<(Vec, Option>)>, fork_choice: ForkChoiceStrategy, import_existing: bool, - ) -> sp_blockchain::Result where + ) -> sp_blockchain::Result + where Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + >::Api: + CoreApi + ApiExt, { let parent_hash = import_headers.post().parent_hash().clone(); let status = self.backend.blockchain().status(BlockId::Hash(hash))?; match (import_existing, status) { (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), (false, blockchain::BlockStatus::Unknown) => {}, - (true, blockchain::BlockStatus::InChain) => {}, + (true, blockchain::BlockStatus::InChain) => {}, (true, blockchain::BlockStatus::Unknown) => {}, } @@ -762,17 +775,18 @@ impl Client where // the block is lower than our last finalized block so it must revert // finality, refusing import. - if status == blockchain::BlockStatus::Unknown - && *import_headers.post().number() <= info.finalized_number + if status == blockchain::BlockStatus::Unknown && + *import_headers.post().number() <= info.finalized_number { - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } // this is a fairly arbitrary choice of where to draw the line on making notifications, // but the general goal is to only make notifications when we are already fully synced // and get a new chain head. let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true, + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => + true, BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, }; @@ -780,15 +794,10 @@ impl Client where Some(storage_changes) => { let storage_changes = match storage_changes { sp_consensus::StorageChanges::Changes(storage_changes) => { - self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; - let ( - main_sc, - child_sc, - offchain_sc, - tx, _, - changes_trie_tx, - tx_index, - ) = storage_changes.into_inner(); + self.backend + .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + let (main_sc, child_sc, offchain_sc, tx, _, changes_trie_tx, tx_index) = + storage_changes.into_inner(); if self.config.offchain_indexing_api { operation.op.update_offchain_storage(offchain_sc)?; @@ -803,7 +812,7 @@ impl Client where } Some((main_sc, child_sc)) - } + }, sp_consensus::StorageChanges::Import(changes) => { let storage = sp_storage::Storage { top: changes.state.into_iter().collect(), @@ -815,10 +824,10 @@ impl Client where // State root mismatch when importing state. This should not happen in safe fast sync mode, // but may happen in unsafe mode. warn!("Error imporing state: State root mismatch."); - return Err(Error::InvalidStateRoot); + return Err(Error::InvalidStateRoot) } None - } + }, }; // ensure parent block is finalized to maintain invariant that @@ -835,15 +844,16 @@ impl Client where operation.op.update_cache(new_cache); storage_changes - }, None => None, }; - let is_new_best = finalized || match fork_choice { - ForkChoiceStrategy::LongestChain => import_headers.post().number() > &info.best_number, - ForkChoiceStrategy::Custom(v) => v, - }; + let is_new_best = finalized || + match fork_choice { + ForkChoiceStrategy::LongestChain => + import_headers.post().number() > &info.best_number, + ForkChoiceStrategy::Custom(v) => v, + }; let leaf_state = if finalized { NewBlockState::Final @@ -854,11 +864,8 @@ impl Client where }; let tree_route = if is_new_best && info.best_hash != parent_hash { - let route_from_best = sp_blockchain::tree_route( - self.backend.blockchain(), - info.best_hash, - parent_hash, - )?; + let route_from_best = + sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, parent_hash)?; Some(route_from_best) } else { None @@ -910,20 +917,24 @@ impl Client where &self, import_block: &mut BlockImportParams>, ) -> sp_blockchain::Result> - where - Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + where + Self: ProvideRuntimeApi, + >::Api: + CoreApi + ApiExt, { let parent_hash = import_block.header.parent_hash(); let at = BlockId::Hash(*parent_hash); let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { - (BlockStatus::Unknown, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), - (BlockStatus::KnownBad, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), + (BlockStatus::Unknown, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (BlockStatus::KnownBad, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), (_, StateAction::Skip) => (false, None), - (BlockStatus::InChainPruned, StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes(_))) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + ( + BlockStatus::InChainPruned, + StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes(_)), + ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::Execute) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), @@ -953,19 +964,14 @@ impl Client where )?; let state = self.backend.state_at(at)?; - let changes_trie_state = changes_tries_state_at_block( - &at, - self.backend.changes_trie_storage(), - )?; + let changes_trie_state = + changes_tries_state_at_block(&at, self.backend.changes_trie_storage())?; - let gen_storage_changes = runtime_api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - *parent_hash, - ).map_err(sp_blockchain::Error::Storage)?; + let gen_storage_changes = runtime_api + .into_storage_changes(&state, changes_trie_state.as_ref(), *parent_hash) + .map_err(sp_blockchain::Error::Storage)?; - if import_block.header.state_root() - != &gen_storage_changes.transaction_storage_root + if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root { return Err(Error::InvalidStateRoot) } @@ -992,20 +998,28 @@ impl Client where let last_finalized = self.backend.blockchain().last_finalized()?; if block == last_finalized { - warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized); - return Ok(()); + warn!( + "Possible safety violation: attempted to re-finalize last finalized block {:?} ", + last_finalized + ); + return Ok(()) } - let route_from_finalized = sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; + let route_from_finalized = + sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; if let Some(retracted) = route_from_finalized.retracted().get(0) { - warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \ - same chain as last finalized {:?}", retracted, last_finalized); + warn!( + "Safety violation: attempted to revert finalized block {:?} which is not in the \ + same chain as last finalized {:?}", + retracted, last_finalized + ); - return Err(sp_blockchain::Error::NotInFinalizedChain); + return Err(sp_blockchain::Error::NotInFinalizedChain) } - let route_from_best = sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; + let route_from_best = + sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; // if the block is not a direct ancestor of the current best chain, // then some other block is the common ancestor. @@ -1042,10 +1056,7 @@ impl Client where Ok(()) } - fn notify_finalized( - &self, - notify_finalized: Vec, - ) -> sp_blockchain::Result<()> { + fn notify_finalized(&self, notify_finalized: Vec) -> sp_blockchain::Result<()> { let mut sinks = self.finality_notification_sinks.lock(); if notify_finalized.is_empty() { @@ -1054,17 +1065,16 @@ impl Client where // would also remove any closed sinks. sinks.retain(|sink| !sink.is_closed()); - return Ok(()); + return Ok(()) } // We assume the list is sorted and only want to inform the // telemetry once about the finalized block. if let Some(last) = notify_finalized.last() { - let header = self.header(&BlockId::Hash(*last))? - .expect( - "Header already known to exist in DB because it is \ - indicated in the tree route; qed" - ); + let header = self.header(&BlockId::Hash(*last))?.expect( + "Header already known to exist in DB because it is \ + indicated in the tree route; qed", + ); telemetry!( self.telemetry; @@ -1076,16 +1086,12 @@ impl Client where } for finalized_hash in notify_finalized { - let header = self.header(&BlockId::Hash(finalized_hash))? - .expect( - "Header already known to exist in DB because it is \ - indicated in the tree route; qed" - ); + let header = self.header(&BlockId::Hash(finalized_hash))?.expect( + "Header already known to exist in DB because it is \ + indicated in the tree route; qed", + ); - let notification = FinalityNotification { - header, - hash: finalized_hash, - }; + let notification = FinalityNotification { header, hash: finalized_hash }; sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); } @@ -1106,22 +1112,19 @@ impl Client where // won't send any import notifications which could lead to a // temporary leak of closed/discarded notification sinks (e.g. // from consensus code). - self.import_notification_sinks - .lock() - .retain(|sink| !sink.is_closed()); + self.import_notification_sinks.lock().retain(|sink| !sink.is_closed()); - return Ok(()); - } + return Ok(()) + }, }; if let Some(storage_changes) = notify_import.storage_changes { // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? - self.storage_notifications.lock() - .trigger( - ¬ify_import.hash, - storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), - ); + self.storage_notifications.lock().trigger( + ¬ify_import.hash, + storage_changes.0.into_iter(), + storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), + ); } let notification = BlockImportNotification:: { @@ -1132,7 +1135,8 @@ impl Client where tree_route: notify_import.tree_route.map(Arc::new), }; - self.import_notification_sinks.lock() + self.import_notification_sinks + .lock() .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); Ok(()) @@ -1179,7 +1183,7 @@ impl Client where // this can probably be implemented more efficiently if let BlockId::Hash(ref h) = id { if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); + return Ok(BlockStatus::Queued) } } let hash_and_number = match id.clone() { @@ -1187,24 +1191,29 @@ impl Client where BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), }; match hash_and_number { - Some((hash, number)) => { + Some((hash, number)) => if self.backend.have_state_at(&hash, number) { Ok(BlockStatus::InChainWithState) } else { Ok(BlockStatus::InChainPruned) - } - } + }, None => Ok(BlockStatus::Unknown), } } /// Get block header by id. - pub fn header(&self, id: &BlockId) -> sp_blockchain::Result::Header>> { + pub fn header( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Header>> { self.backend.blockchain().header(*id) } /// Get block body by id. - pub fn body(&self, id: &BlockId) -> sp_blockchain::Result::Extrinsic>>> { + pub fn body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { self.backend.blockchain().body(*id) } @@ -1215,13 +1224,15 @@ impl Client where max_generation: NumberFor, ) -> sp_blockchain::Result> { let load_header = |id: Block::Hash| -> sp_blockchain::Result { - self.backend.blockchain().header(BlockId::Hash(id))? + self.backend + .blockchain() + .header(BlockId::Hash(id))? .ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))) }; let genesis_hash = self.backend.blockchain().info().genesis_hash; if genesis_hash == target_hash { - return Ok(Vec::new()); + return Ok(Vec::new()) } let mut current_hash = target_hash; @@ -1237,7 +1248,7 @@ impl Client where current_hash = ancestor_hash; if genesis_hash == current_hash { - break; + break } current = ancestor; @@ -1250,21 +1261,20 @@ impl Client where } } -impl UsageProvider for Client where +impl UsageProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { /// Get usage info about current client. fn usage_info(&self) -> ClientInfo { - ClientInfo { - chain: self.chain_info(), - usage: self.backend.usage_info(), - } + ClientInfo { chain: self.chain_info(), usage: self.backend.usage_info() } } } -impl ProofProvider for Client where +impl ProofProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1272,29 +1282,26 @@ impl ProofProvider for Client where fn read_proof( &self, id: &BlockId, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result { - self.state_at(id) - .and_then(|state| prove_read(state, keys) - .map_err(Into::into)) + self.state_at(id).and_then(|state| prove_read(state, keys).map_err(Into::into)) } fn read_child_proof( &self, id: &BlockId, child_info: &ChildInfo, - keys: &mut dyn Iterator, + keys: &mut dyn Iterator, ) -> sp_blockchain::Result { self.state_at(id) - .and_then(|state| prove_child_read(state, child_info, keys) - .map_err(Into::into)) + .and_then(|state| prove_child_read(state, child_info, keys).map_err(Into::into)) } fn execution_proof( &self, id: &BlockId, method: &str, - call_data: &[u8] + call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)> { // Make sure we include the `:code` and `:heap_pages` in the execution proof to be // backwards compatible. @@ -1306,17 +1313,14 @@ impl ProofProvider for Client where )?; let state = self.state_at(id)?; - prove_execution( - state, - &self.executor, - method, - call_data, - ).map(|(r, p)| { - (r, StorageProof::merge(vec![p, code_proof])) - }) + prove_execution(state, &self.executor, method, call_data) + .map(|(r, p)| (r, StorageProof::merge(vec![p, code_proof]))) } - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + fn header_proof( + &self, + id: &BlockId, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { self.header_proof_with_cht_size(id, cht::size()) } @@ -1329,15 +1333,7 @@ impl ProofProvider for Client where storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result> { - self.key_changes_proof_with_cht_size( - first, - last, - min, - max, - storage_key, - key, - cht::size(), - ) + self.key_changes_proof_with_cht_size(first, last, min, max, storage_key, key, cht::size()) } fn read_proof_collection( @@ -1348,11 +1344,11 @@ impl ProofProvider for Client where ) -> sp_blockchain::Result<(StorageProof, u32)> { let state = self.state_at(id)?; Ok(prove_range_read_with_size::<_, HashFor>( - state, - None, - None, - size_limit, - Some(start_key) + state, + None, + None, + size_limit, + Some(start_key), )?) } @@ -1376,14 +1372,13 @@ impl ProofProvider for Client where .unwrap_or_default(); let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { - break; + break } total_size += size; entries.push((next_key.clone(), value)); current_key = next_key; } Ok(entries) - } fn verify_range_proof( @@ -1393,25 +1388,24 @@ impl ProofProvider for Client where start_key: &[u8], ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)> { Ok(read_range_proof_check::>( - root, - proof, - None, - None, - None, - Some(start_key), + root, + proof, + None, + None, + None, + Some(start_key), )?) } } - impl BlockBuilderProvider for Client - where - B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static, - Block: BlockT, - Self: ChainHeaderBackend + ProvideRuntimeApi, - >::Api: ApiExt> - + BlockBuilderApi, +where + B: backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static, + Block: BlockT, + Self: ChainHeaderBackend + ProvideRuntimeApi, + >::Api: + ApiExt> + BlockBuilderApi, { fn new_block_at>( &self, @@ -1425,7 +1419,7 @@ impl BlockBuilderProvider for Client BlockBuilderProvider for Client ExecutorProvider for Client where +impl ExecutorProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1461,19 +1456,26 @@ impl ExecutorProvider for Client where } } -impl StorageProvider for Client where +impl StorageProvider for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { - fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result> { + fn storage_keys( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result> { let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); Ok(keys) } - fn storage_pairs(&self, id: &BlockId, key_prefix: &StorageKey) - -> sp_blockchain::Result> - { + fn storage_pairs( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result> { let state = self.state_at(id)?; let keys = state .keys(&key_prefix.0) @@ -1490,13 +1492,10 @@ impl StorageProvider for Client wher &self, id: &BlockId, prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> + start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { let state = self.state_at(id)?; - let start_key = start_key - .or(prefix) - .map(|key| key.0.clone()) - .unwrap_or_else(Vec::new); + let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new(state, prefix, start_key)) } @@ -1505,13 +1504,10 @@ impl StorageProvider for Client wher id: &BlockId, child_info: ChildInfo, prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> + start_key: Option<&StorageKey>, ) -> sp_blockchain::Result> { let state = self.state_at(id)?; - let start_key = start_key - .or(prefix) - .map(|key| key.0.clone()) - .unwrap_or_else(Vec::new); + let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new); Ok(KeyIterator::new_child(state, child_info, prefix, start_key)) } @@ -1520,30 +1516,32 @@ impl StorageProvider for Client wher id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .map(StorageData) - ) + Ok(self + .state_at(id)? + .storage(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData)) } - fn storage_hash( &self, id: &BlockId, key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) + Ok(self + .state_at(id)? + .storage_hash(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) } fn child_storage_keys( &self, id: &BlockId, child_info: &ChildInfo, - key_prefix: &StorageKey + key_prefix: &StorageKey, ) -> sp_blockchain::Result> { - let keys = self.state_at(id)? + let keys = self + .state_at(id)? .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) @@ -1555,9 +1553,10 @@ impl StorageProvider for Client wher &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? + Ok(self + .state_at(id)? .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) @@ -1567,12 +1566,12 @@ impl StorageProvider for Client wher &self, id: &BlockId, child_info: &ChildInfo, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? + Ok(self + .state_at(id)? .child_storage_hash(child_info, &key.0) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) } fn max_key_changes_range( @@ -1583,7 +1582,9 @@ impl StorageProvider for Client wher let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; if first > last_number { - return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); + return Err(sp_blockchain::Error::ChangesTrieAccessFailed( + "Invalid changes trie range".into(), + )) } let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { @@ -1598,7 +1599,7 @@ impl StorageProvider for Client wher let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); Ok(Some((first, last))) }, - None => Ok(None) + None => Ok(None), } } @@ -1607,7 +1608,7 @@ impl StorageProvider for Client wher first: NumberFor, last: BlockId, storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey + key: &StorageKey, ) -> sp_blockchain::Result, u32)>> { let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; @@ -1618,12 +1619,20 @@ impl StorageProvider for Client wher for (config_zero, config_end, config) in configs { let range_first = ::std::cmp::max(first, config_zero + One::one()); let range_anchor = match config_end { - Some((config_end_number, config_end_hash)) => if last_number > config_end_number { - ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } - } else { - ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } - }, - None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, + Some((config_end_number, config_end_hash)) => + if last_number > config_end_number { + ChangesTrieAnchorBlockId { + hash: config_end_hash, + number: config_end_number, + } + } else { + ChangesTrieAnchorBlockId { + hash: convert_hash(&last_hash), + number: last_number, + } + }, + None => + ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, }; let config_range = ChangesTrieConfigurationRange { @@ -1638,9 +1647,10 @@ impl StorageProvider for Client wher &range_anchor, best_number, storage_key, - &key.0) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + &key.0, + ) + .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; result.extend(result_range); } @@ -1648,14 +1658,18 @@ impl StorageProvider for Client wher } } -impl HeaderMetadata for Client where +impl HeaderMetadata for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { type Error = sp_blockchain::Error; - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { self.backend.blockchain().header_metadata(hash) } @@ -1668,21 +1682,26 @@ impl HeaderMetadata for Client where } } -impl ProvideUncles for Client where +impl ProvideUncles for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, { - fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { + fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result> { Ok(Client::uncles(self, target_hash, max_generation)? .into_iter() .filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None)) - .collect() - ) + .collect()) } } -impl ChainHeaderBackend for Client where +impl ChainHeaderBackend for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1700,7 +1719,10 @@ impl ChainHeaderBackend for Client wher self.backend.blockchain().status(id) } - fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + fn number( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { self.backend.blockchain().number(hash) } @@ -1709,7 +1731,8 @@ impl ChainHeaderBackend for Client wher } } -impl sp_runtime::traits::BlockIdTo for Client where +impl sp_runtime::traits::BlockIdTo for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1721,12 +1744,16 @@ impl sp_runtime::traits::BlockIdTo for Client) -> sp_blockchain::Result>> { + fn to_number( + &self, + block_id: &BlockId, + ) -> sp_blockchain::Result>> { self.block_number_from_id(block_id) } } -impl ChainHeaderBackend for &Client where +impl ChainHeaderBackend for &Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1744,7 +1771,10 @@ impl ChainHeaderBackend for &Client whe (**self).status(id) } - fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + fn number( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { (**self).number(hash) } @@ -1753,7 +1783,8 @@ impl ChainHeaderBackend for &Client whe } } -impl ProvideCache for Client where +impl ProvideCache for Client +where B: backend::Backend, Block: BlockT, { @@ -1762,7 +1793,8 @@ impl ProvideCache for Client where } } -impl ProvideRuntimeApi for Client where +impl ProvideRuntimeApi for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1775,7 +1807,8 @@ impl ProvideRuntimeApi for Client where } } -impl CallApiAt for Client where +impl CallApiAt for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, @@ -1792,28 +1825,25 @@ impl CallApiAt for Client where ) -> Result, sp_api::ApiError> { let at = params.at; - let (manager, extensions) = self.execution_extensions.manager_and_extensions( - at, - params.context, - ); - - self.executor.contextual_call:: _, _, _>( - at, - params.function, - ¶ms.arguments, - params.overlayed_changes, - Some(params.storage_transaction_cache), - manager, - params.native_call, - params.recorder, - Some(extensions), - ).map_err(Into::into) + let (manager, extensions) = + self.execution_extensions.manager_and_extensions(at, params.context); + + self.executor + .contextual_call:: _, _, _>( + at, + params.function, + ¶ms.arguments, + params.overlayed_changes, + Some(params.storage_transaction_cache), + manager, + params.native_call, + params.recorder, + Some(extensions), + ) + .map_err(Into::into) } - fn runtime_version_at( - &self, - at: &BlockId, - ) -> Result { + fn runtime_version_at(&self, at: &BlockId) -> Result { self.runtime_version_at(at).map_err(Into::into) } } @@ -1822,13 +1852,14 @@ impl CallApiAt for Client where /// objects. Otherwise, importing blocks directly into the client would be bypassing /// important verification work. #[async_trait::async_trait] -impl sp_consensus::BlockImport for &Client where +impl sp_consensus::BlockImport for &Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi + - ApiExt, + as ProvideRuntimeApi>::Api: + CoreApi + ApiExt, RA: Sync + Send, backend::TransactionFor: Send + 'static, { @@ -1852,17 +1883,19 @@ impl sp_consensus::BlockImport for &Client return Ok(res), - PrepareStorageChangesResult::Import(storage_changes) => storage_changes, - }; + let storage_changes = + match self.prepare_block_storage_changes(&mut import_block).map_err(|e| { + warn!("Block prepare storage changes error:\n{:?}", e); + ConsensusError::ClientImport(e.to_string()) + })? { + PrepareStorageChangesResult::Discard(res) => return Ok(res), + PrepareStorageChangesResult::Import(storage_changes) => storage_changes, + }; self.lock_import_and_run(|operation| { self.apply_block(operation, import_block, new_cache, storage_changes) - }).map_err(|e| { + }) + .map_err(|e| { warn!("Block import error:\n{:?}", e); ConsensusError::ClientImport(e.to_string()).into() }) @@ -1873,18 +1906,15 @@ impl sp_consensus::BlockImport for &Client, ) -> Result { - let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = block; + let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = + block; // Check the block against white and black lists if any are defined // (i.e. fork blocks and bad blocks respectively) match self.block_rules.lookup(number, &hash) { BlockLookupResult::KnownBad => { - trace!( - "Rejecting known bad block: #{} {:?}", - number, - hash, - ); - return Ok(ImportResult::KnownBad); + trace!("Rejecting known bad block: #{} {:?}", number, hash,); + return Ok(ImportResult::KnownBad) }, BlockLookupResult::Expected(expected_hash) => { trace!( @@ -1893,51 +1923,51 @@ impl sp_consensus::BlockImport for &Client {} + BlockLookupResult::NotSpecial => {}, } // Own status must be checked first. If the block and ancestry is pruned // this function must return `AlreadyInChain` rather than `MissingState` - match self.block_status(&BlockId::Hash(hash)) + match self + .block_status(&BlockId::Hash(hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { - BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => { - return Ok(ImportResult::AlreadyInChain) - }, + BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => + return Ok(ImportResult::AlreadyInChain), BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::InChainPruned if !import_existing => { - return Ok(ImportResult::AlreadyInChain) - }, + BlockStatus::InChainPruned if !import_existing => + return Ok(ImportResult::AlreadyInChain), BlockStatus::InChainPruned => {}, BlockStatus::Unknown => {}, BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), } - match self.block_status(&BlockId::Hash(parent_hash)) + match self + .block_status(&BlockId::Hash(parent_hash)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - { - BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), - BlockStatus::InChainPruned if allow_missing_state => {}, - BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), - BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), - } - + { + BlockStatus::InChainWithState | BlockStatus::Queued => {}, + BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), + BlockStatus::InChainPruned if allow_missing_state => {}, + BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } Ok(ImportResult::imported(false)) } } #[async_trait::async_trait] -impl sp_consensus::BlockImport for Client where +impl sp_consensus::BlockImport for Client +where B: backend::Backend, E: CallExecutor + Send + Sync, Block: BlockT, Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, + >::Api: + CoreApi + ApiExt, RA: Sync + Send, backend::TransactionFor: Send + 'static, { @@ -1960,7 +1990,8 @@ impl sp_consensus::BlockImport for Client Finalizer for Client where +impl Finalizer for Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -1995,8 +2026,8 @@ impl Finalizer for Client where } } - -impl Finalizer for &Client where +impl Finalizer for &Client +where B: backend::Backend, E: CallExecutor, Block: BlockT, @@ -2050,10 +2081,10 @@ where } impl BlockBackend for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { fn block_body( &self, @@ -2092,35 +2123,37 @@ impl BlockBackend for Client fn block_indexed_body( &self, - id: &BlockId + id: &BlockId, ) -> sp_blockchain::Result>>> { self.backend.blockchain().block_indexed_body(*id) } } impl backend::AuxStore for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - Self: ProvideRuntimeApi, - >::Api: CoreApi, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Self: ProvideRuntimeApi, + >::Api: CoreApi, { /// Insert auxiliary data into key-value store. fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { // Import is locked here because we may have other block import // operations that tries to set aux data. Note that for consensus // layer, one can always use atomic operations to make sure // import is only locked once. - self.lock_import_and_run(|operation| { - apply_aux(operation, insert, delete) - }) + self.lock_import_and_run(|operation| apply_aux(operation, insert, delete)) } /// Query auxiliary data from key-value store. fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { @@ -2129,20 +2162,24 @@ impl backend::AuxStore for Client } impl backend::AuxStore for &Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: CoreApi, { fn insert_aux< 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { (**self).insert_aux(insert, delete) } @@ -2152,10 +2189,10 @@ impl backend::AuxStore for &Client } impl sp_consensus::block_validation::Chain for Client - where - BE: backend::Backend, - E: CallExecutor, - B: BlockT, +where + BE: backend::Backend, + E: CallExecutor, + B: BlockT, { fn block_status( &self, @@ -2174,8 +2211,10 @@ where fn block_indexed_body( &self, number: NumberFor, - ) ->Result>>, sp_transaction_storage_proof::Error> { - self.backend.blockchain().block_indexed_body(BlockId::number(number)) + ) -> Result>>, sp_transaction_storage_proof::Error> { + self.backend + .blockchain() + .block_indexed_body(BlockId::number(number)) .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) } @@ -2183,7 +2222,9 @@ where &self, hash: B::Hash, ) -> Result>, sp_transaction_storage_proof::Error> { - self.backend.blockchain().number(hash) + self.backend + .blockchain() + .number(hash) .map_err(|e| sp_transaction_storage_proof::Error::Application(Box::new(e))) } } diff --git a/substrate/client/service/src/client/genesis.rs b/substrate/client/service/src/client/genesis.rs index 08235f7efb6e3ba4b3bc974e8ab6c83fb291508b..e764e8e24f10516e3c1cad4495f22625a5d13755 100644 --- a/substrate/client/service/src/client/genesis.rs +++ b/substrate/client/service/src/client/genesis.rs @@ -18,17 +18,12 @@ //! Tool for creating the genesis block. -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Zero}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}; /// Create a genesis block, given the initial storage. -pub fn construct_genesis_block< - Block: BlockT -> ( - state_root: Block::Hash -) -> Block { - let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - Vec::new(), - ); +pub fn construct_genesis_block(state_root: Block::Hash) -> Block { + let extrinsics_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root(Vec::new()); Block::new( <::Header as HeaderT>::new( @@ -36,8 +31,8 @@ pub fn construct_genesis_block< extrinsics_root, state_root, Default::default(), - Default::default() + Default::default(), ), - Default::default() + Default::default(), ) } diff --git a/substrate/client/service/src/client/light.rs b/substrate/client/service/src/client/light.rs index 3a09bcbd78de52cfd5ec5c153c561442e0345290..82fe17e6855e9a259e55b2bba57bbf1f61e41001 100644 --- a/substrate/client/service/src/client/light.rs +++ b/substrate/client/service/src/client/light.rs @@ -20,15 +20,20 @@ use std::sync::Arc; +use prometheus_endpoint::Registry; use sc_executor::RuntimeInfo; -use sp_core::traits::{CodeExecutor, SpawnNamed}; use sc_telemetry::TelemetryHandle; -use sp_runtime::BuildStorage; -use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_blockchain::Result as ClientResult; -use prometheus_endpoint::Registry; +use sp_core::traits::{CodeExecutor, SpawnNamed}; +use sp_runtime::{ + traits::{Block as BlockT, HashFor}, + BuildStorage, +}; -use super::{call_executor::LocalCallExecutor, client::{Client, ClientConfig}}; +use super::{ + call_executor::LocalCallExecutor, + client::{Client, ClientConfig}, +}; use sc_client_api::light::Storage as BlockchainStorage; use sc_light::{Backend, GenesisCallExecutor}; @@ -41,26 +46,26 @@ pub fn new_light( prometheus_registry: Option, telemetry: Option, ) -> ClientResult< - Client< + Client< + Backend>, + GenesisCallExecutor< Backend>, - GenesisCallExecutor< - Backend>, - LocalCallExecutor>, E> - >, - B, - RA - > - > - where - B: BlockT, - S: BlockchainStorage + 'static, - E: CodeExecutor + RuntimeInfo + Clone + 'static, + LocalCallExecutor>, E>, + >, + B, + RA, + >, +> +where + B: BlockT, + S: BlockchainStorage + 'static, + E: CodeExecutor + RuntimeInfo + Clone + 'static, { let local_executor = LocalCallExecutor::new( backend.clone(), code_executor, spawn_handle.clone(), - ClientConfig::default() + ClientConfig::default(), )?; let executor = GenesisCallExecutor::new(backend.clone(), local_executor); Client::new( diff --git a/substrate/client/service/src/client/mod.rs b/substrate/client/service/src/client/mod.rs index dd0b70b551bf4187d6367de9c30b61482982443f..754309e864ebd1563cece6306cf0bc54f64d4993 100644 --- a/substrate/client/service/src/client/mod.rs +++ b/substrate/client/service/src/client/mod.rs @@ -45,11 +45,11 @@ //! the ways in which the runtime can interface with the outside. Any code that builds a `Client` //! is responsible for putting the right marker. -pub mod genesis; -pub mod light; +mod block_rules; mod call_executor; mod client; -mod block_rules; +pub mod genesis; +pub mod light; mod wasm_override; mod wasm_substitutes; @@ -58,5 +58,5 @@ pub use self::{ client::{Client, ClientConfig}, }; -#[cfg(feature="test-helpers")] -pub use self::client::{new_with_backend, new_in_mem}; +#[cfg(feature = "test-helpers")] +pub use self::client::{new_in_mem, new_with_backend}; diff --git a/substrate/client/service/src/client/wasm_override.rs b/substrate/client/service/src/client/wasm_override.rs index 06a719c346ca66ac9312b1deb10a3360d181755a..7abd04f2be2366e81cb793bdf1e90e22f3917338 100644 --- a/substrate/client/service/src/client/wasm_override.rs +++ b/substrate/client/service/src/client/wasm_override.rs @@ -35,18 +35,17 @@ //! A custom WASM blob will override on-chain WASM if the spec version matches. If it is //! required to overrides multiple runtimes, multiple WASM blobs matching each of the spec versions //! needed must be provided in the given directory. -//! +use sc_executor::RuntimeInfo; +use sp_blockchain::Result; +use sp_core::traits::{FetchRuntimeCode, RuntimeCode}; +use sp_state_machine::BasicExternalities; +use sp_version::RuntimeVersion; use std::{ - fs, collections::{HashMap, hash_map::DefaultHasher}, - path::{Path, PathBuf}, + collections::{hash_map::DefaultHasher, HashMap}, + fs, hash::Hasher as _, + path::{Path, PathBuf}, }; -use sp_core::traits::FetchRuntimeCode; -use sp_state_machine::BasicExternalities; -use sp_blockchain::Result; -use sc_executor::RuntimeInfo; -use sp_version::RuntimeVersion; -use sp_core::traits::RuntimeCode; #[derive(Clone, Debug, PartialEq)] /// Auxiliary structure that holds a wasm blob and its hash. @@ -62,11 +61,7 @@ impl WasmBlob { } fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { - RuntimeCode { - code_fetcher: self, - hash: self.hash.clone(), - heap_pages, - } + RuntimeCode { code_fetcher: self, hash: self.hash.clone(), heap_pages } } } @@ -117,7 +112,7 @@ pub struct WasmOverride { impl WasmOverride where - E: RuntimeInfo + Clone + 'static + E: RuntimeInfo + Clone + 'static, { pub fn new

(path: P, executor: E) -> Result where @@ -130,26 +125,19 @@ where /// Gets an override by it's runtime spec version. /// /// Returns `None` if an override for a spec version does not exist. - pub fn get<'a, 'b: 'a>( - &'b self, - spec: &u32, - pages: Option, - ) -> Option> { - self.overrides - .get(spec) - .map(|w| w.runtime_code(pages)) + pub fn get<'a, 'b: 'a>(&'b self, spec: &u32, pages: Option) -> Option> { + self.overrides.get(spec).map(|w| w.runtime_code(pages)) } /// Scrapes a folder for WASM runtimes. /// Returns a hashmap of the runtime version and wasm runtime code. fn scrape_overrides(dir: &Path, executor: &E) -> Result> { - - let handle_err = |e: std::io::Error | -> sp_blockchain::Error { + let handle_err = |e: std::io::Error| -> sp_blockchain::Error { WasmOverrideError::Io(dir.to_owned(), e).into() }; if !dir.is_dir() { - return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()); + return Err(WasmOverrideError::NotADirectory(dir.to_owned()).into()) } let mut overrides = HashMap::new(); @@ -176,13 +164,13 @@ where ); duplicates.push(format!("{}", path.display())); } - } - _ => () + }, + _ => (), } } if !duplicates.is_empty() { - return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()); + return Err(WasmOverrideError::DuplicateRuntime(duplicates).into()) } Ok(overrides) @@ -194,7 +182,8 @@ where heap_pages: Option, ) -> Result { let mut ext = BasicExternalities::default(); - executor.runtime_version(&mut ext, &code.runtime_code(heap_pages)) + executor + .runtime_version(&mut ext, &code.runtime_code(heap_pages)) .map_err(|e| WasmOverrideError::VersionInvalid(format!("{:?}", e)).into()) } } @@ -203,28 +192,25 @@ where #[cfg(test)] pub fn dummy_overrides(executor: &E) -> WasmOverride where - E: RuntimeInfo + Clone + 'static + E: RuntimeInfo + Clone + 'static, { let mut overrides = HashMap::new(); overrides.insert(0, WasmBlob::new(vec![0, 0, 0, 0, 0, 0, 0, 0])); overrides.insert(1, WasmBlob::new(vec![1, 1, 1, 1, 1, 1, 1, 1])); overrides.insert(2, WasmBlob::new(vec![2, 2, 2, 2, 2, 2, 2, 2])); - WasmOverride { - overrides, - executor: executor.clone() - } + WasmOverride { overrides, executor: executor.clone() } } #[cfg(test)] mod tests { use super::*; use sc_executor::{NativeExecutor, WasmExecutionMethod}; - use substrate_test_runtime_client::LocalExecutor; use std::fs::{self, File}; + use substrate_test_runtime_client::LocalExecutor; fn wasm_test(fun: F) where - F: Fn(&Path, &[u8], &NativeExecutor::) + F: Fn(&Path, &[u8], &NativeExecutor), { let exec = NativeExecutor::::new( WasmExecutionMethod::Interpreted, @@ -252,8 +238,8 @@ mod tests { fn should_scrape_wasm() { wasm_test(|dir, wasm_bytes, exec| { fs::write(dir.join("test.wasm"), wasm_bytes).expect("Create test file"); - let overrides = WasmOverride::scrape_overrides(dir, exec) - .expect("HashMap of u32 and WasmBlob"); + let overrides = + WasmOverride::scrape_overrides(dir, exec).expect("HashMap of u32 and WasmBlob"); let wasm = overrides.get(&2).expect("WASM binary"); assert_eq!(wasm.code, substrate_test_runtime::wasm_binary_unwrap().to_vec()) }); @@ -272,10 +258,10 @@ mod tests { Some(WasmOverrideError::DuplicateRuntime(duplicates)) => { assert_eq!(duplicates.len(), 1); }, - _ => panic!("Test should end with Msg Error Variant") + _ => panic!("Test should end with Msg Error Variant"), } }, - _ => panic!("Test should end in error") + _ => panic!("Test should end in error"), } }); } @@ -286,8 +272,8 @@ mod tests { File::create(dir.join("README.md")).expect("Create test file"); File::create(dir.join("LICENSE")).expect("Create a test file"); fs::write(dir.join("test0.wasm"), wasm_bytes).expect("Create test file"); - let scraped = WasmOverride::scrape_overrides(dir, exec) - .expect("HashMap of u32 and WasmBlob"); + let scraped = + WasmOverride::scrape_overrides(dir, exec).expect("HashMap of u32 and WasmBlob"); assert_eq!(scraped.len(), 1); }); } diff --git a/substrate/client/service/src/client/wasm_substitutes.rs b/substrate/client/service/src/client/wasm_substitutes.rs index e947e4566f33217ada05e2d4965c1bc3d2b3d6b8..ac48059fc2f37e11159d88724bc623fe96683536 100644 --- a/substrate/client/service/src/client/wasm_substitutes.rs +++ b/substrate/client/service/src/client/wasm_substitutes.rs @@ -18,15 +18,22 @@ //! # WASM substitutes -use std::{collections::{HashMap, hash_map::DefaultHasher}, hash::Hasher as _, sync::Arc}; +use parking_lot::RwLock; +use sc_client_api::backend; +use sc_executor::RuntimeInfo; +use sp_blockchain::{HeaderBackend, Result}; use sp_core::traits::{FetchRuntimeCode, RuntimeCode}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; use sp_state_machine::BasicExternalities; -use sp_blockchain::{Result, HeaderBackend}; -use sc_executor::RuntimeInfo; use sp_version::RuntimeVersion; -use sc_client_api::backend; -use sp_runtime::{traits::{NumberFor, Block as BlockT}, generic::BlockId}; -use parking_lot::RwLock; +use std::{ + collections::{hash_map::DefaultHasher, HashMap}, + hash::Hasher as _, + sync::Arc, +}; /// A wasm substitute for the on chain wasm. #[derive(Debug)] @@ -51,11 +58,7 @@ impl WasmSubstitute { } fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { - RuntimeCode { - code_fetcher: self, - hash: self.hash.clone(), - heap_pages, - } + RuntimeCode { code_fetcher: self, hash: self.hash.clone(), heap_pages } } /// Returns `true` when the substitute matches for the given `block_id`. @@ -82,7 +85,8 @@ impl WasmSubstitute { block_number }; - let requested_block_number = backend.blockchain().block_number_from_id(&block_id).ok().flatten(); + let requested_block_number = + backend.blockchain().block_number_from_id(&block_id).ok().flatten(); Some(block_number) <= requested_block_number } @@ -145,11 +149,14 @@ where executor: Executor, backend: Arc, ) -> Result { - let substitutes = substitutes.into_iter().map(|(parent_block_hash, code)| { - let substitute = WasmSubstitute::new(code, parent_block_hash, &*backend)?; - let version = Self::runtime_version(&executor, &substitute)?; - Ok((version.spec_version, substitute)) - }).collect::>>()?; + let substitutes = substitutes + .into_iter() + .map(|(parent_block_hash, code)| { + let substitute = WasmSubstitute::new(code, parent_block_hash, &*backend)?; + let version = Self::runtime_version(&executor, &substitute)?; + Ok((version.spec_version, substitute)) + }) + .collect::>>()?; Ok(Self { executor, substitutes: Arc::new(substitutes), backend }) } @@ -172,8 +179,8 @@ where code: &WasmSubstitute, ) -> Result { let mut ext = BasicExternalities::default(); - executor.runtime_version(&mut ext, &code.runtime_code(None)) + executor + .runtime_version(&mut ext, &code.runtime_code(None)) .map_err(|e| WasmSubstituteError::VersionInvalid(format!("{:?}", e)).into()) } } - diff --git a/substrate/client/service/src/config.rs b/substrate/client/service/src/config.rs index be14b4e322e765a8ec6cf837bd8e6f0c80d262eb..c915978f5384e5d7e15a9940d7af17b89998e9b1 100644 --- a/substrate/client/service/src/config.rs +++ b/substrate/client/service/src/config.rs @@ -18,25 +18,34 @@ //! Service configuration. +pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; pub use sc_client_db::{ - Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig, - KeepBlocks, TransactionStorageMode -}; -pub use sc_network::Multiaddr; -pub use sc_network::config::{ - ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig, - SetConfig, NonDefaultSetConfig, TransportConfig, - RequestResponseConfig, IncomingRequest, OutgoingResponse, + Database, DatabaseSettingsSrc as DatabaseConfig, KeepBlocks, PruningMode, + TransactionStorageMode, }; pub use sc_executor::WasmExecutionMethod; -pub use sc_client_api::execution_extensions::{ExecutionStrategies, ExecutionStrategy}; +pub use sc_network::{ + config::{ + ExtTransport, IncomingRequest, MultiaddrWithPeerId, NetworkConfiguration, NodeKeyConfig, + NonDefaultSetConfig, OutgoingResponse, RequestResponseConfig, Role, SetConfig, + TransportConfig, + }, + Multiaddr, +}; -use std::{io, future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; -pub use sc_transaction_pool::Options as TransactionPoolOptions; +use prometheus_endpoint::Registry; use sc_chain_spec::ChainSpec; -use sp_core::crypto::SecretString; pub use sc_telemetry::TelemetryEndpoints; -use prometheus_endpoint::Registry; +pub use sc_transaction_pool::Options as TransactionPoolOptions; +use sp_core::crypto::SecretString; +use std::{ + future::Future, + io, + net::SocketAddr, + path::{Path, PathBuf}, + pin::Pin, + sync::Arc, +}; #[cfg(not(target_os = "unknown"))] use tempfile::TempDir; @@ -153,7 +162,7 @@ pub enum KeystoreConfig { /// The path of the keystore. path: PathBuf, /// Node keystore's password. - password: Option + password: Option, }, /// In-memory keystore. Recommended for in-browser nodes. InMemory, @@ -194,7 +203,7 @@ impl PrometheusConfig { Self { port, registry: Registry::new_custom(Some("substrate".into()), None) - .expect("this can only fail if the prefix is empty") + .expect("this can only fail if the prefix is empty"), } } } @@ -215,11 +224,13 @@ impl Configuration { let protocol_id_full = match self.chain_spec.protocol_id() { Some(pid) => pid, None => { - log::warn!("Using default protocol ID {:?} because none is configured in the \ - chain specs", crate::DEFAULT_PROTOCOL_ID + log::warn!( + "Using default protocol ID {:?} because none is configured in the \ + chain specs", + crate::DEFAULT_PROTOCOL_ID ); crate::DEFAULT_PROTOCOL_ID - } + }, }; sc_network::config::ProtocolId::from(protocol_id_full) } @@ -261,9 +272,7 @@ impl BasePath { /// instance is dropped. #[cfg(not(target_os = "unknown"))] pub fn new_temp_dir() -> io::Result { - Ok(BasePath::Temporary( - tempfile::Builder::new().prefix("substrate").tempdir()?, - )) + Ok(BasePath::Temporary(tempfile::Builder::new().prefix("substrate").tempdir()?)) } /// Create a `BasePath` instance based on an existing path on disk. diff --git a/substrate/client/service/src/error.rs b/substrate/client/service/src/error.rs index 9c653219ca130f1b68f1bf637f52315a363cc84a..1acd33ead6777475915fa10575adc2925e24648c 100644 --- a/substrate/client/service/src/error.rs +++ b/substrate/client/service/src/error.rs @@ -18,10 +18,10 @@ //! Errors that can occur during the service operation. -use sc_network; use sc_keystore; -use sp_consensus; +use sc_network; use sp_blockchain; +use sp_consensus; /// Service Result typedef. pub type Result = std::result::Result; diff --git a/substrate/client/service/src/lib.rs b/substrate/client/service/src/lib.rs index 40cb1aeea6a9e46ae7491d071b880c41b000a0de..5d7c490db6abfb7d6cff665c68580c00bb1ecfe2 100644 --- a/substrate/client/service/src/lib.rs +++ b/substrate/client/service/src/lib.rs @@ -22,65 +22,62 @@ #![warn(missing_docs)] #![recursion_limit = "1024"] -pub mod config; pub mod chain_ops; +pub mod config; pub mod error; -mod metrics; mod builder; #[cfg(feature = "test-helpers")] pub mod client; #[cfg(not(feature = "test-helpers"))] mod client; +mod metrics; mod task_manager; -use std::{io, pin::Pin}; -use std::net::SocketAddr; -use std::collections::HashMap; -use std::task::Poll; +use std::{collections::HashMap, io, net::SocketAddr, pin::Pin, task::Poll}; -use futures::{Future, FutureExt, Stream, StreamExt, stream, compat::*}; -use sc_network::PeerId; -use log::{warn, debug, error}; -use codec::{Encode, Decode}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use codec::{Decode, Encode}; +use futures::{compat::*, stream, Future, FutureExt, Stream, StreamExt}; +use log::{debug, error, warn}; use parity_util_mem::MallocSizeOf; +use sc_network::PeerId; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; use sp_utils::mpsc::TracingUnboundedReceiver; -pub use self::error::Error; -pub use self::builder::{ - new_full_client, new_db_backend, new_client, new_full_parts, new_light_parts, - spawn_tasks, build_network, build_offchain_workers, - BuildNetworkParams, KeystoreContainer, NetworkStarter, SpawnTasksParams, TFullClient, TLightClient, - TFullBackend, TLightBackend, TLightBackendWithHash, TLightClientWithBackend, - TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, +pub use self::{ + builder::{ + build_network, build_offchain_workers, new_client, new_db_backend, new_full_client, + new_full_parts, new_light_parts, spawn_tasks, BuildNetworkParams, KeystoreContainer, + NetworkStarter, NoopRpcExtensionBuilder, RpcExtensionBuilder, SpawnTasksParams, + TFullBackend, TFullCallExecutor, TFullClient, TLightBackend, TLightBackendWithHash, + TLightCallExecutor, TLightClient, TLightClientWithBackend, + }, + client::{ClientConfig, LocalCallExecutor}, + error::Error, }; pub use config::{ - BasePath, Configuration, DatabaseConfig, PruningMode, Role, RpcMethods, TaskExecutor, TaskType, - KeepBlocks, TransactionStorageMode, + BasePath, Configuration, DatabaseConfig, KeepBlocks, PruningMode, Role, RpcMethods, + TaskExecutor, TaskType, TransactionStorageMode, }; pub use sc_chain_spec::{ - ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension, - NoExtension, ChainType, + ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, + Properties, RuntimeGenesis, }; -pub use sc_transaction_pool_api::{TransactionPool, InPoolTransaction, error::IntoPoolError}; -pub use sc_transaction_pool::Options as TransactionPoolOptions; -pub use sc_rpc::Metadata as RpcMetadata; +use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use std::{ops::Deref, result::Result, sync::Arc}; -#[doc(hidden)] -pub use sc_network::config::{ - OnDemand, TransactionImport, - TransactionImportFuture, -}; +pub use sc_network::config::{OnDemand, TransactionImport, TransactionImportFuture}; +pub use sc_rpc::Metadata as RpcMetadata; pub use sc_tracing::TracingReceiver; -pub use task_manager::SpawnTaskHandle; -pub use task_manager::TaskManager; +pub use sc_transaction_pool::Options as TransactionPoolOptions; +pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; pub use sp_consensus::import_queue::ImportQueue; -pub use self::client::{LocalCallExecutor, ClientConfig}; -use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; +#[doc(hidden)] +pub use std::{ops::Deref, result::Result, sync::Arc}; +pub use task_manager::{SpawnTaskHandle, TaskManager}; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -96,7 +93,9 @@ impl MallocSizeOfWasm for T {} /// RPC handlers that can perform RPC queries. #[derive(Clone)] -pub struct RpcHandlers(Arc>); +pub struct RpcHandlers( + Arc>, +); impl RpcHandlers { /// Starts an RPC query. @@ -108,17 +107,22 @@ impl RpcHandlers { /// /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to /// send back spontaneous events. - pub fn rpc_query(&self, mem: &RpcSession, request: &str) - -> Pin> + Send>> { - self.0.handle_request(request, mem.metadata.clone()) + pub fn rpc_query( + &self, + mem: &RpcSession, + request: &str, + ) -> Pin> + Send>> { + self.0 + .handle_request(request, mem.metadata.clone()) .compat() .map(|res| res.expect("this should never fail")) .boxed() } /// Provides access to the underlying `MetaIoHandler` - pub fn io_handler(&self) - -> Arc> { + pub fn io_handler( + &self, + ) -> Arc> { self.0.clone() } } @@ -149,8 +153,8 @@ pub struct PartialComponents + HeaderBackend, - H: sc_network::ExHashT -> ( + H: sc_network::ExHashT, +>( role: Role, mut network: sc_network::NetworkWorker, client: Arc, @@ -171,7 +175,9 @@ async fn build_network_future< // ready. This way, we only get the latest finalized block. stream::poll_fn(move |cx| { let mut last = None; - while let Poll::Ready(Some(item)) = Pin::new(&mut finality_notification_stream).poll_next(cx) { + while let Poll::Ready(Some(item)) = + Pin::new(&mut finality_notification_stream).poll_next(cx) + { last = Some(item); } if let Some(last) = last { @@ -179,11 +185,12 @@ async fn build_network_future< } else { Poll::Pending } - }).fuse() + }) + .fuse() }; loop { - futures::select!{ + futures::select! { // List of blocks that the client has imported. notification = imported_blocks_stream.next() => { let notification = match notification { @@ -338,79 +345,90 @@ mod waiting { /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(not(target_os = "unknown"))] fn start_rpc_servers< - H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) - -> sc_rpc_server::RpcHandler + H: FnMut( + sc_rpc::DenyUnsafe, + sc_rpc_server::RpcMiddleware, + ) -> sc_rpc_server::RpcHandler, >( config: &Configuration, mut gen_handler: H, rpc_metrics: sc_rpc_server::RpcMetrics, ) -> Result, error::Error> { - fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> - where F: FnMut(&SocketAddr) -> Result, - { - address.map(|mut address| start(&address) - .or_else(|e| match e.kind() { - io::ErrorKind::AddrInUse | - io::ErrorKind::PermissionDenied => { + fn maybe_start_server( + address: Option, + mut start: F, + ) -> Result, io::Error> + where + F: FnMut(&SocketAddr) -> Result, + { + address + .map(|mut address| { + start(&address).or_else(|e| match e.kind() { + io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied => { warn!("Unable to bind RPC server to {}. Trying random port.", address); address.set_port(0); start(&address) }, _ => Err(e), - } - ) ).transpose() - } + }) + }) + .transpose() + } fn deny_unsafe(addr: &SocketAddr, methods: &RpcMethods) -> sc_rpc::DenyUnsafe { let is_exposed_addr = !addr.ip().is_loopback(); match (is_exposed_addr, methods) { - | (_, RpcMethods::Unsafe) - | (false, RpcMethods::Auto) => sc_rpc::DenyUnsafe::No, - _ => sc_rpc::DenyUnsafe::Yes + | (_, RpcMethods::Unsafe) | (false, RpcMethods::Auto) => sc_rpc::DenyUnsafe::No, + _ => sc_rpc::DenyUnsafe::Yes, } } Ok(Box::new(( - config.rpc_ipc.as_ref().map(|path| sc_rpc_server::start_ipc( - &*path, gen_handler( - sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ipc") + config.rpc_ipc.as_ref().map(|path| { + sc_rpc_server::start_ipc( + &*path, + gen_handler( + sc_rpc::DenyUnsafe::No, + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ipc"), + ), ) - )), - maybe_start_server( - config.rpc_http, - |address| sc_rpc_server::start_http( + }), + maybe_start_server(config.rpc_http, |address| { + sc_rpc_server::start_http( address, config.rpc_http_threads, config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "http") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "http"), ), - config.rpc_max_payload - ), - )?.map(|s| waiting::HttpServer(Some(s))), - maybe_start_server( - config.rpc_ws, - |address| sc_rpc_server::start_ws( + config.rpc_max_payload, + ) + })? + .map(|s| waiting::HttpServer(Some(s))), + maybe_start_server(config.rpc_ws, |address| { + sc_rpc_server::start_ws( address, config.rpc_ws_max_connections, config.rpc_cors.as_ref(), gen_handler( deny_unsafe(&address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ws") + sc_rpc_server::RpcMiddleware::new(rpc_metrics.clone(), "ws"), ), - config.rpc_max_payload - ), - )?.map(|s| waiting::WsServer(Some(s))), + config.rpc_max_payload, + ) + })? + .map(|s| waiting::WsServer(Some(s))), ))) } /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(target_os = "unknown")] fn start_rpc_servers< - H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) - -> sc_rpc_server::RpcHandler + H: FnMut( + sc_rpc::DenyUnsafe, + sc_rpc_server::RpcMiddleware, + ) -> sc_rpc_server::RpcHandler, >( _: &Configuration, _: H, @@ -434,9 +452,7 @@ impl RpcSession { /// /// The `RpcSession` must be kept alive in order to receive messages on the sender. pub fn new(sender: futures01::sync::mpsc::Sender) -> RpcSession { - RpcSession { - metadata: sender.into(), - } + RpcSession { metadata: sender.into() } } } @@ -450,10 +466,9 @@ pub struct TransactionPoolAdapter { /// Get transactions for propagation. /// /// Function extracted to simplify the test and prevent creating `ServiceFactory`. -fn transactions_to_propagate(pool: &Pool) - -> Vec<(H, B::Extrinsic)> +fn transactions_to_propagate(pool: &Pool) -> Vec<(H, B::Extrinsic)> where - Pool: TransactionPool, + Pool: TransactionPool, B: BlockT, H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, E: IntoPoolError + From, @@ -468,11 +483,10 @@ where .collect() } -impl sc_network::config::TransactionPool for - TransactionPoolAdapter +impl sc_network::config::TransactionPool for TransactionPoolAdapter where C: sc_network::config::Client + Send + Sync, - Pool: 'static + TransactionPool, + Pool: 'static + TransactionPool, B: BlockT, H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, E: 'static + IntoPoolError + From, @@ -485,10 +499,7 @@ where self.pool.hash_of(transaction) } - fn import( - &self, - transaction: B::Extrinsic, - ) -> TransactionImportFuture { + fn import(&self, transaction: B::Extrinsic) -> TransactionImportFuture { if !self.imports_external_transactions { debug!("Transaction rejected"); Box::pin(futures::future::ready(TransactionImport::None)); @@ -499,28 +510,33 @@ where Ok(uxt) => uxt, Err(e) => { debug!("Transaction invalid: {:?}", e); - return Box::pin(futures::future::ready(TransactionImport::Bad)); - } + return Box::pin(futures::future::ready(TransactionImport::Bad)) + }, }; let best_block_id = BlockId::hash(self.client.info().best_hash); - let import_future = self.pool.submit_one(&best_block_id, sc_transaction_pool_api::TransactionSource::External, uxt); + let import_future = self.pool.submit_one( + &best_block_id, + sc_transaction_pool_api::TransactionSource::External, + uxt, + ); Box::pin(async move { match import_future.await { Ok(_) => TransactionImport::NewGood, Err(e) => match e.into_pool_error() { - Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => TransactionImport::KnownGood, + Ok(sc_transaction_pool_api::error::Error::AlreadyImported(_)) => + TransactionImport::KnownGood, Ok(e) => { debug!("Error adding transaction to the pool: {:?}", e); TransactionImport::Bad - } + }, Err(e) => { debug!("Error converting pool error: {:?}", e); // it is not bad at least, just some internal node logic error, so peer is innocent. TransactionImport::KnownGood - } - } + }, + }, } }) } @@ -530,11 +546,10 @@ where } fn transaction(&self, hash: &H) -> Option { - self.pool.ready_transaction(hash) - .and_then( - // Only propagable transactions should be resolved for network service. - |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None } - ) + self.pool.ready_transaction(hash).and_then( + // Only propagable transactions should be resolved for network service. + |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None }, + ) } } @@ -542,10 +557,13 @@ where mod tests { use super::*; use futures::executor::block_on; + use sc_transaction_pool::BasicPool; use sp_consensus::SelectChain; use sp_runtime::traits::BlindCheckable; - use substrate_test_runtime_client::{prelude::*, runtime::{Extrinsic, Transfer}}; - use sc_transaction_pool::BasicPool; + use substrate_test_runtime_client::{ + prelude::*, + runtime::{Extrinsic, Transfer}, + }; #[test] fn should_not_propagate_transactions_that_are_marked_as_such() { @@ -553,13 +571,8 @@ mod tests { let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - true.into(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let source = sp_runtime::transaction_validity::TransactionSource::External; let best = block_on(longest_chain.best_chain()).unwrap(); let transaction = Transfer { @@ -569,12 +582,14 @@ mod tests { to: Default::default(), } .into_signed_tx(); + block_on(pool.submit_one(&BlockId::hash(best.hash()), source, transaction.clone())) + .unwrap(); block_on(pool.submit_one( - &BlockId::hash(best.hash()), source, transaction.clone()), - ).unwrap(); - block_on(pool.submit_one( - &BlockId::hash(best.hash()), source, Extrinsic::IncludeData(vec![1])), - ).unwrap(); + &BlockId::hash(best.hash()), + source, + Extrinsic::IncludeData(vec![1]), + )) + .unwrap(); assert_eq!(pool.status().ready, 2); // when diff --git a/substrate/client/service/src/metrics.rs b/substrate/client/service/src/metrics.rs index 7c74b327ea2600135bba898c8b39789a97207f03..cd03916c9261b9302d6f6f05a173da2d95d29d56 100644 --- a/substrate/client/service/src/metrics.rs +++ b/substrate/client/service/src/metrics.rs @@ -20,16 +20,15 @@ use std::{convert::TryFrom, time::SystemTime}; use crate::config::Configuration; use futures_timer::Delay; -use prometheus_endpoint::{register, Gauge, U64, Registry, PrometheusError, Opts, GaugeVec}; +use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; +use sc_client_api::{ClientInfo, UsageProvider}; +use sc_network::{config::Role, NetworkService, NetworkStatus}; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; +use sc_transaction_pool_api::{MaintainedTransactionPool, PoolStatus}; use sp_api::ProvideRuntimeApi; -use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; -use sc_transaction_pool_api::{PoolStatus, MaintainedTransactionPool}; +use sp_runtime::traits::{Block, NumberFor, SaturatedConversion, UniqueSaturatedInto}; use sp_utils::metrics::register_globals; -use sc_client_api::{ClientInfo, UsageProvider}; -use sc_network::{config::Role, NetworkStatus, NetworkService}; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use wasm_timer::Instant; struct PrometheusMetrics { @@ -51,54 +50,74 @@ impl PrometheusMetrics { version: &str, roles: u64, ) -> Result { - register(Gauge::::with_opts( - Opts::new( - "build_info", - "A metric with a constant '1' value labeled by name, version" - ) + register( + Gauge::::with_opts( + Opts::new( + "build_info", + "A metric with a constant '1' value labeled by name, version", + ) .const_label("name", name) - .const_label("version", version) - )?, ®istry)?.set(1); + .const_label("version", version), + )?, + ®istry, + )? + .set(1); - register(Gauge::::new( - "node_roles", "The roles the node is running as", - )?, ®istry)?.set(roles); + register(Gauge::::new("node_roles", "The roles the node is running as")?, ®istry)? + .set(roles); register_globals(registry)?; - let start_time_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) - .unwrap_or_default(); - register(Gauge::::new( - "process_start_time_seconds", - "Number of seconds between the UNIX epoch and the moment the process started", - )?, registry)?.set(start_time_since_epoch.as_secs()); + let start_time_since_epoch = + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default(); + register( + Gauge::::new( + "process_start_time_seconds", + "Number of seconds between the UNIX epoch and the moment the process started", + )?, + registry, + )? + .set(start_time_since_epoch.as_secs()); Ok(Self { // generic internals - block_height: register(GaugeVec::new( - Opts::new("block_height", "Block height info of the chain"), - &["status"] - )?, registry)?, - - number_leaves: register(Gauge::new( - "number_leaves", "Number of known chain leaves (aka forks)", - )?, registry)?, - - ready_transactions_number: register(Gauge::new( - "ready_transactions_number", "Number of transactions in the ready queue", - )?, registry)?, + block_height: register( + GaugeVec::new( + Opts::new("block_height", "Block height info of the chain"), + &["status"], + )?, + registry, + )?, + + number_leaves: register( + Gauge::new("number_leaves", "Number of known chain leaves (aka forks)")?, + registry, + )?, + + ready_transactions_number: register( + Gauge::new( + "ready_transactions_number", + "Number of transactions in the ready queue", + )?, + registry, + )?, // I/ O - database_cache: register(Gauge::new( - "database_cache_bytes", "RocksDB cache size in bytes", - )?, registry)?, - state_cache: register(Gauge::new( - "state_cache_bytes", "State cache size in bytes", - )?, registry)?, - state_db: register(GaugeVec::new( - Opts::new("state_db_cache_bytes", "State DB cache in bytes"), - &["subtype"] - )?, registry)?, + database_cache: register( + Gauge::new("database_cache_bytes", "RocksDB cache size in bytes")?, + registry, + )?, + state_cache: register( + Gauge::new("state_cache_bytes", "State cache size in bytes")?, + registry, + )?, + state_db: register( + GaugeVec::new( + Opts::new("state_db_cache_bytes", "State DB cache in bytes"), + &["subtype"], + )?, + registry, + )?, }) } } @@ -179,11 +198,7 @@ impl MetricsService { let net_status = network.status().await.ok(); // Update / Send the metrics. - self.update( - &client.usage_info(), - &transactions.status(), - net_status, - ); + self.update(&client.usage_info(), &transactions.status(), net_status); // Schedule next tick. timer.reset(timer_interval); @@ -220,14 +235,8 @@ impl MetricsService { ); if let Some(metrics) = self.metrics.as_ref() { - metrics - .block_height - .with_label_values(&["finalized"]) - .set(finalized_number); - metrics - .block_height - .with_label_values(&["best"]) - .set(best_number); + metrics.block_height.with_label_values(&["finalized"]).set(finalized_number); + metrics.block_height.with_label_values(&["best"]).set(best_number); if let Ok(leaves) = u64::try_from(info.chain.number_leaves) { metrics.number_leaves.set(leaves); @@ -239,15 +248,17 @@ impl MetricsService { metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64); metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64); - metrics.state_db.with_label_values(&["non_canonical"]).set( - info.memory.state_db.non_canonical.as_bytes() as u64, - ); + metrics + .state_db + .with_label_values(&["non_canonical"]) + .set(info.memory.state_db.non_canonical.as_bytes() as u64); if let Some(pruning) = info.memory.state_db.pruning { metrics.state_db.with_label_values(&["pruning"]).set(pruning.as_bytes() as u64); } - metrics.state_db.with_label_values(&["pinned"]).set( - info.memory.state_db.pinned.as_bytes() as u64, - ); + metrics + .state_db + .with_label_values(&["pinned"]) + .set(info.memory.state_db.pinned.as_bytes() as u64); } } @@ -259,14 +270,13 @@ impl MetricsService { let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; - let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = - if elapsed > 0 { - self.last_total_bytes_inbound = total_bytes_inbound; - self.last_total_bytes_outbound = total_bytes_outbound; - (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) - } else { - (diff_bytes_inbound, diff_bytes_outbound) - }; + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) + }; telemetry!( self.telemetry; @@ -278,9 +288,10 @@ impl MetricsService { ); if let Some(metrics) = self.metrics.as_ref() { - let best_seen_block: Option = net_status - .best_seen_block - .map(|num: NumberFor| UniqueSaturatedInto::::unique_saturated_into(num)); + let best_seen_block: Option = + net_status.best_seen_block.map(|num: NumberFor| { + UniqueSaturatedInto::::unique_saturated_into(num) + }); if let Some(best_seen_block) = best_seen_block { metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); diff --git a/substrate/client/service/src/task_manager/mod.rs b/substrate/client/service/src/task_manager/mod.rs index c7254f1f894de9e63dcdeba0c37bc3cde9f2b8db..d759798f744b685fff187f222e1e19865037f1fa 100644 --- a/substrate/client/service/src/task_manager/mod.rs +++ b/substrate/client/service/src/task_manager/mod.rs @@ -18,22 +18,24 @@ //! Substrate service tasks management module. -use std::{panic, result::Result, pin::Pin}; +use crate::{ + config::{JoinFuture, TaskExecutor, TaskType}, + Error, +}; use exit_future::Signal; -use log::{debug, error}; use futures::{ - Future, FutureExt, StreamExt, - future::{select, Either, BoxFuture, join_all, try_join_all, pending}, + future::{join_all, pending, select, try_join_all, BoxFuture, Either}, sink::SinkExt, + Future, FutureExt, StreamExt, }; +use log::{debug, error}; use prometheus_endpoint::{ - exponential_buckets, register, - PrometheusError, - CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64 + exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, + Registry, U64, }; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{panic, pin::Pin, result::Result}; use tracing_futures::Instrument; -use crate::{config::{TaskExecutor, TaskType, JoinFuture}, Error}; mod prometheus_future; #[cfg(test)] @@ -62,7 +64,11 @@ impl SpawnTaskHandle { } /// Spawns the blocking task with the given name. See also `spawn`. - pub fn spawn_blocking(&self, name: &'static str, task: impl Future + Send + 'static) { + pub fn spawn_blocking( + &self, + name: &'static str, + task: impl Future + Send + 'static, + ) { self.spawn_inner(name, task, TaskType::Blocking) } @@ -75,7 +81,7 @@ impl SpawnTaskHandle { ) { if self.task_notifier.is_closed() { debug!("Attempt to spawn a new task has been prevented: {}", name); - return; + return } let on_exit = self.on_exit.clone(); @@ -95,7 +101,8 @@ impl SpawnTaskHandle { let task = { let poll_duration = metrics.poll_duration.with_label_values(&[name]); let poll_start = metrics.poll_start.with_label_values(&[name]); - let inner = prometheus_future::with_poll_durations(poll_duration, poll_start, task); + let inner = + prometheus_future::with_poll_durations(poll_duration, poll_start, task); // The logic of `AssertUnwindSafe` here is ok considering that we throw // away the `Future` after it has panicked. panic::AssertUnwindSafe(inner).catch_unwind() @@ -106,16 +113,15 @@ impl SpawnTaskHandle { Either::Right((Err(payload), _)) => { metrics.tasks_ended.with_label_values(&[name, "panic"]).inc(); panic::resume_unwind(payload) - } + }, Either::Right((Ok(()), _)) => { metrics.tasks_ended.with_label_values(&[name, "finished"]).inc(); - } + }, Either::Left(((), _)) => { // The `on_exit` has triggered. metrics.tasks_ended.with_label_values(&[name, "interrupted"]).inc(); - } + }, } - } else { futures::pin_mut!(task); let _ = select(on_exit, task).await; @@ -162,10 +168,7 @@ impl SpawnEssentialTaskHandle { essential_failed_tx: TracingUnboundedSender<()>, spawn_task_handle: SpawnTaskHandle, ) -> SpawnEssentialTaskHandle { - SpawnEssentialTaskHandle { - essential_failed_tx, - inner: spawn_task_handle, - } + SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle } } /// Spawns the given task with the given name. @@ -193,12 +196,10 @@ impl SpawnEssentialTaskHandle { task_type: TaskType, ) { let essential_failed = self.essential_failed_tx.clone(); - let essential_task = std::panic::AssertUnwindSafe(task) - .catch_unwind() - .map(move |_| { - log::error!("Essential task `{}` failed. Shutting down service.", name); - let _ = essential_failed.close_channel(); - }); + let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| { + log::error!("Essential task `{}` failed. Shutting down service.", name); + let _ = essential_failed.close_channel(); + }); let _ = self.inner.spawn_inner(name, essential_task, task_type); } @@ -260,10 +261,8 @@ impl TaskManager { // NOTE: for_each_concurrent will await on all the JoinHandle futures at the same time. It // is possible to limit this but it's actually better for the memory foot print to await // them all to not accumulate anything on that stream. - let completion_future = executor.spawn( - Box::pin(background_tasks.for_each_concurrent(None, |x| x)), - TaskType::Async, - ); + let completion_future = executor + .spawn(Box::pin(background_tasks.for_each_concurrent(None, |x| x)), TaskType::Async); Ok(Self { on_exit, @@ -323,16 +322,21 @@ impl TaskManager { /// /// This function will not wait until the end of the remaining task. You must call and await /// `clean_shutdown()` after this. - pub fn future<'a>(&'a mut self) -> Pin> + Send + 'a>> { + pub fn future<'a>( + &'a mut self, + ) -> Pin> + Send + 'a>> { Box::pin(async move { let mut t1 = self.essential_failed_rx.next().fuse(); let mut t2 = self.on_exit.clone().fuse(); let mut t3 = try_join_all( - self.children.iter_mut().map(|x| x.future()) + self.children + .iter_mut() + .map(|x| x.future()) // Never end this future if there is no error because if there is no children, // it must not stop - .chain(std::iter::once(pending().boxed())) - ).fuse(); + .chain(std::iter::once(pending().boxed())), + ) + .fuse(); futures::select! { _ = t1 => Err(Error::Other("Essential task failed.".into())), diff --git a/substrate/client/service/src/task_manager/prometheus_future.rs b/substrate/client/service/src/task_manager/prometheus_future.rs index 6d2a52354d6ca2548a7aa823fda2e71b2dbffd17..43a76a0f596c269a6a366760e195ae6b1bb1ef1d 100644 --- a/substrate/client/service/src/task_manager/prometheus_future.rs +++ b/substrate/client/service/src/task_manager/prometheus_future.rs @@ -20,20 +20,20 @@ use futures::prelude::*; use prometheus_endpoint::{Counter, Histogram, U64}; -use std::{fmt, pin::Pin, task::{Context, Poll}}; +use std::{ + fmt, + pin::Pin, + task::{Context, Poll}, +}; /// Wraps around a `Future`. Report the polling duration to the `Histogram` and when the polling /// starts to the `Counter`. pub fn with_poll_durations( poll_duration: Histogram, poll_start: Counter, - inner: T + inner: T, ) -> PrometheusFuture { - PrometheusFuture { - inner, - poll_duration, - poll_start, - } + PrometheusFuture { inner, poll_duration, poll_start } } /// Wraps around `Future` and adds diagnostics to it. diff --git a/substrate/client/service/src/task_manager/tests.rs b/substrate/client/service/src/task_manager/tests.rs index 09768a19339f2968dbac0446b6f385212e36cae5..d8789e556e1e9fcddb0c2db1303ef8bc8c49ba49 100644 --- a/substrate/client/service/src/task_manager/tests.rs +++ b/substrate/client/service/src/task_manager/tests.rs @@ -16,8 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::config::TaskExecutor; -use crate::task_manager::TaskManager; +use crate::{config::TaskExecutor, task_manager::TaskManager}; use futures::{future::FutureExt, pin_mut, select}; use parking_lot::Mutex; use std::{any::Any, sync::Arc, time::Duration}; @@ -205,7 +204,9 @@ fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 2); spawn_essential_handle.spawn("task3", async { panic!("task failed") }); - runtime.block_on(task_manager.future()).expect_err("future()'s Result must be Err"); + runtime + .block_on(task_manager.future()) + .expect_err("future()'s Result must be Err"); assert_eq!(drop_tester, 2); runtime.block_on(task_manager.clean_shutdown()); assert_eq!(drop_tester, 0); @@ -265,7 +266,9 @@ fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); assert_eq!(drop_tester, 4); spawn_essential_handle_child_1.spawn("task5", async { panic!("task failed") }); - runtime.block_on(task_manager.future()).expect_err("future()'s Result must be Err"); + runtime + .block_on(task_manager.future()) + .expect_err("future()'s Result must be Err"); assert_eq!(drop_tester, 4); runtime.block_on(task_manager.clean_shutdown()); assert_eq!(drop_tester, 0); diff --git a/substrate/client/service/test/src/client/db.rs b/substrate/client/service/test/src/client/db.rs index a86e8f2de467c35e71f7f26a0998a6e76e3fe830..5278c9a13a4d7dcfcebba6344a34d461aa12cdb4 100644 --- a/substrate/client/service/test/src/client/db.rs +++ b/substrate/client/service/test/src/client/db.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_core::offchain::{OffchainStorage, storage::InMemOffchainStorage}; +use sp_core::offchain::{storage::InMemOffchainStorage, OffchainStorage}; use std::sync::Arc; type TestBackend = sc_client_api::in_mem::Backend; @@ -32,12 +32,13 @@ fn test_leaves_with_complex_block_tree() { fn test_blockchain_query_by_number_gets_canonical() { let backend = Arc::new(TestBackend::new()); - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( + backend, + ); } #[test] fn in_memory_offchain_storage() { - let mut storage = InMemOffchainStorage::default(); assert_eq!(storage.get(b"A", b"B"), None); assert_eq!(storage.get(b"B", b"A"), None); diff --git a/substrate/client/service/test/src/client/light.rs b/substrate/client/service/test/src/client/light.rs index 4d620139fa49e43407267c0e764cc3368daf3072..8d1411214d3461ec48722bd7fa2d0e82adbd492d 100644 --- a/substrate/client/service/test/src/client/light.rs +++ b/substrate/client/service/test/src/client/light.rs @@ -16,53 +16,52 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use super::prepare_client_with_key_changes; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::Mutex; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + backend::NewBlockState, + blockchain::Info, + cht, + in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, + AuxStore, Backend as ClientBackend, BlockBackend, BlockImportOperation, CallExecutor, + ChangesProof, ExecutionStrategy, FetchChecker, ProofProvider, ProvideChtRoots, + RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, + RemoteReadChildRequest, RemoteReadRequest, Storage, StorageProof, StorageProvider, +}; +use sc_executor::{NativeExecutor, NativeVersion, RuntimeVersion, WasmExecutionMethod}; use sc_light::{ - call_executor::{ - GenesisCallExecutor, - check_execution_proof, - }, - fetcher::LightDataChecker, - blockchain::{BlockchainCache, Blockchain}, backend::{Backend, GenesisOrUnavailableState}, + blockchain::{Blockchain, BlockchainCache}, + call_executor::{check_execution_proof, GenesisCallExecutor}, + fetcher::LightDataChecker, +}; +use sp_api::{ProofRecorder, StorageTransactionCache}; +use sp_blockchain::{ + well_known_cache_keys, BlockStatus, CachedHeaderMetadata, Error as ClientError, HeaderBackend, + Result as ClientResult, }; -use std::sync::Arc; +use sp_consensus::BlockOrigin; +use sp_core::{testing::TaskExecutor, NativeOrEncoded, H256}; +use sp_externalities::Extensions; use sp_runtime::{ generic::BlockId, traits::{BlakeTwo256, Block as _, HashFor, Header as HeaderT, NumberFor}, Digest, Justifications, }; -use std::collections::HashMap; -use parking_lot::Mutex; +use sp_state_machine::{ExecutionManager, OverlayedChanges}; +use std::{cell::RefCell, collections::HashMap, panic::UnwindSafe, sync::Arc}; use substrate_test_runtime_client::{ - runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, -}; -use sp_api::{StorageTransactionCache, ProofRecorder}; -use sp_consensus::BlockOrigin; -use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; -use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; -use sc_client_api::{ - blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, - in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, ProvideChtRoots, - AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, - RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, - RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, -}; -use sp_externalities::Extensions; -use sc_block_builder::BlockBuilderProvider; -use sp_blockchain::{ - BlockStatus, Result as ClientResult, Error as ClientError, CachedHeaderMetadata, - HeaderBackend, well_known_cache_keys -}; -use std::panic::UnwindSafe; -use std::cell::RefCell; -use sp_state_machine::{OverlayedChanges, ExecutionManager}; -use parity_scale_codec::{Decode, Encode}; -use super::prepare_client_with_key_changes; -use substrate_test_runtime_client::{ - AccountKeyring, runtime::{self, Extrinsic}, + runtime::{self, Block, Extrinsic, Hash, Header}, + AccountKeyring, ClientBlockImportExt, TestClient, }; -use sp_core::{blake2_256, ChangesTrieConfiguration, storage::{well_known_keys, StorageKey, ChildInfo}}; +use sp_core::{ + blake2_256, + storage::{well_known_keys, ChildInfo, StorageKey}, + ChangesTrieConfiguration, +}; use sp_state_machine::Backend as _; pub type DummyBlockchain = Blockchain; @@ -115,7 +114,8 @@ impl sp_blockchain::HeaderMetadata for DummyStorage { type Error = ClientError; fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) + self.header(BlockId::hash(hash))? + .map(|header| CachedHeaderMetadata::from(&header)) .ok_or(ClientError::UnknownBlock("header not found".to_owned())) } fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} @@ -127,9 +127,13 @@ impl AuxStore for DummyStorage { 'a, 'b: 'a, 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, _delete: D) -> ClientResult<()> { + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + _delete: D, + ) -> ClientResult<()> { for (k, v) in insert.into_iter() { self.aux_store.lock().insert(k.to_vec(), v.to_vec()); } @@ -182,9 +186,10 @@ impl ProvideChtRoots for DummyStorage { cht::block_to_cht_number(cht_size, block) .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) .cloned() - .ok_or_else(|| ClientError::Backend( - format!("Test error: CHT for block #{} not found", block) - ).into()) + .ok_or_else(|| { + ClientError::Backend(format!("Test error: CHT for block #{} not found", block)) + .into() + }) .map(Some) } } @@ -210,7 +215,7 @@ impl CallExecutor for DummyCallExecutor { fn contextual_call< EM: Fn( Result, Self::Error>, - Result, Self::Error> + Result, Self::Error>, ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, NC: FnOnce() -> Result + UnwindSafe, @@ -220,17 +225,22 @@ impl CallExecutor for DummyCallExecutor { _method: &str, _call_data: &[u8], _changes: &RefCell, - _storage_transaction_cache: Option<&RefCell< - StorageTransactionCache< - Block, - >::State, - > - >>, + _storage_transaction_cache: Option< + &RefCell< + StorageTransactionCache< + Block, + >::State, + >, + >, + >, _execution_manager: ExecutionManager, _native_call: Option, _proof_recorder: &Option>, _extensions: Option, - ) -> ClientResult> where ExecutionManager: Clone { + ) -> ClientResult> + where + ExecutionManager: Clone, + { unreachable!() } @@ -243,7 +253,7 @@ impl CallExecutor for DummyCallExecutor { _trie_state: &sp_state_machine::TrieBackend>, _overlay: &mut OverlayedChanges, _method: &str, - _call_data: &[u8] + _call_data: &[u8], ) -> Result<(Vec, StorageProof), ClientError> { unreachable!() } @@ -260,11 +270,11 @@ fn local_executor() -> NativeExecutor = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); + let backend: Backend<_, BlakeTwo256> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header0, None, None, None, NewBlockState::Final).unwrap(); op.set_genesis_state(Default::default(), true).unwrap(); @@ -278,9 +288,8 @@ fn local_state_is_created_when_genesis_state_is_available() { #[test] fn unavailable_state_is_created_when_genesis_state_is_unavailable() { - let backend: Backend<_, BlakeTwo256> = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); + let backend: Backend<_, BlakeTwo256> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); match backend.state_at(BlockId::Number(0)).unwrap() { GenesisOrUnavailableState::Unavailable => (), @@ -305,11 +314,8 @@ fn execution_proof_is_generated_and_checked() { let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); // 'fetch' execution proof from remote node - let (remote_result, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); + let (remote_result, remote_execution_proof) = + remote_client.execution_proof(&remote_block_id, method, &[]).unwrap(); // check remote execution proof locally let local_result = check_execution_proof::<_, _, BlakeTwo256>( @@ -323,7 +329,8 @@ fn execution_proof_is_generated_and_checked() { retry_count: None, }, remote_execution_proof, - ).unwrap(); + ) + .unwrap(); (remote_result, local_result) } @@ -333,17 +340,20 @@ fn execution_proof_is_generated_and_checked() { let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); // 'fetch' execution proof from remote node - let (_, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - "Core_initialize_block", - &Header::new( - at, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ).encode(), - ).unwrap(); + let (_, remote_execution_proof) = remote_client + .execution_proof( + &remote_block_id, + "Core_initialize_block", + &Header::new( + at, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + .encode(), + ) + .unwrap(); // check remote execution proof locally let execution_result = check_execution_proof::<_, _, BlakeTwo256>( @@ -359,7 +369,8 @@ fn execution_proof_is_generated_and_checked() { Default::default(), remote_header.hash(), remote_header.digest().clone(), // this makes next header wrong - ).encode(), + ) + .encode(), retry_count: None, }, remote_execution_proof, @@ -379,7 +390,8 @@ fn execution_proof_is_generated_and_checked() { BlockOrigin::Own, remote_client.new_block(digest).unwrap().build().unwrap().block, Justifications::from((*b"TEST", Default::default())), - )).unwrap(); + )) + .unwrap(); } // check method that doesn't requires environment @@ -401,22 +413,26 @@ fn execution_proof_is_generated_and_checked() { fn code_is_executed_at_genesis_only() { let backend = Arc::new(InMemBackend::::new()); let def = H256::default(); - let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); + let header0 = + substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); let hash0 = header0.hash(); - let header1 = substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); + let header1 = + substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); let hash1 = header1.hash(); - backend.blockchain().insert(hash0, header0, None, None, NewBlockState::Final).unwrap(); - backend.blockchain().insert(hash1, header1, None, None, NewBlockState::Final).unwrap(); + backend + .blockchain() + .insert(hash0, header0, None, None, NewBlockState::Final) + .unwrap(); + backend + .blockchain() + .insert(hash1, header1, None, None, NewBlockState::Final) + .unwrap(); let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); assert_eq!( - genesis_executor.call( - &BlockId::Number(0), - "test_method", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ).unwrap(), + genesis_executor + .call(&BlockId::Number(0), "test_method", &[], ExecutionStrategy::NativeElseWasm, None,) + .unwrap(), vec![42], ); @@ -434,7 +450,6 @@ fn code_is_executed_at_genesis_only() { } } - type TestChecker = LightDataChecker< NativeExecutor, BlakeTwo256, @@ -448,27 +463,28 @@ fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0 + .into(); // 'fetch' read proof from remote node - let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) + let heap_pages = remote_client + .storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) .unwrap() - .and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap(); - let remote_read_proof = remote_client.read_proof( - &remote_block_id, - &mut std::iter::once(well_known_keys::HEAP_PAGES), - ).unwrap(); + .and_then(|v| Decode::decode(&mut &v.0[..]).ok()) + .unwrap(); + let remote_read_proof = remote_client + .read_proof(&remote_block_id, &mut std::iter::once(well_known_keys::HEAP_PAGES)) + .unwrap(); // check remote read proof locally let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); + local_storage + .insert(remote_block_hash, remote_block_header.clone(), None, None, NewBlockState::Final) + .unwrap(); let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), @@ -478,45 +494,39 @@ fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - use substrate_test_runtime_client::DefaultTestClientBuilderExt; - use substrate_test_runtime_client::TestClientBuilderExt; + use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; let child_info = ChildInfo::new_default(b"child1"); let child_info = &child_info; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() - .add_extra_child_storage( - child_info, - b"key1".to_vec(), - b"value1".to_vec(), - ).build(); + .add_extra_child_storage(child_info, b"key1".to_vec(), b"value1".to_vec()) + .build(); let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0 + .into(); // 'fetch' child read proof from remote node - let child_value = remote_client.child_storage( - &remote_block_id, - child_info, - &StorageKey(b"key1".to_vec()), - ).unwrap().unwrap().0; + let child_value = remote_client + .child_storage(&remote_block_id, child_info, &StorageKey(b"key1".to_vec())) + .unwrap() + .unwrap() + .0; assert_eq!(b"value1"[..], child_value[..]); - let remote_read_proof = remote_client.read_child_proof( - &remote_block_id, - child_info, - &mut std::iter::once("key1".as_bytes()), - ).unwrap(); + let remote_read_proof = remote_client + .read_child_proof(&remote_block_id, child_info, &mut std::iter::once("key1".as_bytes())) + .unwrap(); // check locally let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); + local_storage + .insert(remote_block_hash, remote_block_header.clone(), None, None, NewBlockState::Final) + .unwrap(); let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), @@ -533,18 +543,21 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; futures::executor::block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); local_headers_hashes.push( - remote_client.block_hash(i + 1) - .map_err(|_| ClientError::Backend("TestError".into())) + remote_client + .block_hash(i + 1) + .map_err(|_| ClientError::Backend("TestError".into())), ); } // 'fetch' header proof from remote node let remote_block_id = BlockId::Number(1); - let (remote_block_header, remote_header_proof) = remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); + let (remote_block_header, remote_header_proof) = + remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); // check remote read proof locally let local_storage = InMemoryBlockchain::::new(); - let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); + let local_cht_root = + cht::compute_root::(4, 0, local_headers_hashes).unwrap(); if insert_cht { local_storage.insert_cht_root(1, local_cht_root); } @@ -557,7 +570,7 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade } fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { - use sp_trie::{TrieConfiguration, trie_types::Layout}; + use sp_trie::{trie_types::Layout, TrieConfiguration}; let iter = extrinsics.iter().map(Encode::encode); let extrinsics_root = Layout::::ordered_trie_root(iter); @@ -567,66 +580,106 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::

{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); + let (local_checker, remote_block_header, remote_read_proof, heap_pages) = + prepare_for_read_proof_check(); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_proof( + &RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .remove(well_known_keys::HEAP_PAGES) + .unwrap() + .unwrap()[0], + heap_pages as u8 + ); } #[test] fn storage_child_read_proof_is_generated_and_checked() { let child_info = ChildInfo::new_default(&b"child1"[..]); - let ( - local_checker, - remote_block_header, - remote_read_proof, - result, - ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - storage_key: child_info.prefixed_storage_key(), - keys: vec![b"key1".to_vec()], - retry_count: None, - }, - remote_read_proof - ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); + let (local_checker, remote_block_header, remote_read_proof, result) = + prepare_for_read_child_proof_check(); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_child_proof( + &RemoteReadChildRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + storage_key: child_info.prefixed_storage_key(), + keys: vec![b"key1".to_vec()], + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .remove(b"key1".as_ref()) + .unwrap() + .unwrap(), + result + ); } #[test] fn header_proof_is_generated_and_checked() { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .unwrap(), + remote_block_header + ); } #[test] fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + let (local_checker, _, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: Default::default(), - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); + assert!((&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: Default::default(), + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); } #[test] fn check_header_proof_fails_if_invalid_header_provided() { - let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); + assert!((&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); } #[test] @@ -647,9 +700,9 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { // 'fetch' changes proof from remote node let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key - ).unwrap(); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) + .unwrap(); // check proof on local client let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); @@ -668,20 +721,23 @@ fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { storage_key: None, retry_count: None, }; - let local_result = local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).unwrap(); + let local_result = local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + ) + .unwrap(); // ..and ensure that result is the same as on remote node if local_result != expected_result { panic!( "Failed test {}: local = {:?}, expected = {:?}", - index, - local_result, - expected_result, + index, local_result, expected_result, ); } } @@ -702,12 +758,17 @@ fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) + .unwrap(); // prepare local checker, having a root of changes trie CHT#0 - let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + let local_cht_root = cht::compute_root::( + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); let mut local_storage = DummyStorage::new(); local_storage.changes_tries_cht_roots.insert(0, local_cht_root); let local_checker = TestChecker::new( @@ -732,12 +793,18 @@ fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { key: dave.0, retry_count: None, }; - let local_result = local_checker.check_changes_proof_with_cht_size(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }, 4).unwrap(); + let local_result = local_checker + .check_changes_proof_with_cht_size( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + 4, + ) + .unwrap(); assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); } @@ -760,8 +827,9 @@ fn check_changes_proof_fails_if_proof_is_wrong() { // 'fetch' changes proof from remote node let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key).unwrap(); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) + .unwrap(); let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); let config = ChangesTrieConfiguration::new(4, 2); @@ -781,34 +849,54 @@ fn check_changes_proof_fails_if_proof_is_wrong() { }; // check proof on local client using max from the future - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block + 1, - proof: remote_proof.proof.clone(), - roots: remote_proof.roots.clone(), - roots_proof: remote_proof.roots_proof.clone(), - }).is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block + 1, + proof: remote_proof.proof.clone(), + roots: remote_proof.roots.clone(), + roots_proof: remote_proof.roots_proof.clone(), + } + ) + .is_err()); // check proof on local client using broken proof - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + } + ) + .is_err()); // extra roots proofs are provided - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(begin - 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(end + 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(begin - 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + } + ) + .is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(end + 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + } + ) + .is_err()); } #[test] @@ -817,7 +905,11 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); let local_cht_root = cht::compute_root::( - 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); let dave = StorageKey(dave); @@ -828,9 +920,9 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) + .unwrap(); // fails when changes trie CHT is missing from the local db let local_checker = TestChecker::new( @@ -838,8 +930,9 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { local_executor(), Box::new(TaskExecutor::new()), ); - assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, - remote_proof.roots_proof.clone()).is_err()); + assert!(local_checker + .check_changes_tries_proof(4, &remote_proof.roots, remote_proof.roots_proof.clone()) + .is_err()); // fails when proof is broken let mut local_storage = DummyStorage::new(); @@ -849,17 +942,15 @@ fn check_changes_tries_proof_fails_if_proof_is_wrong() { local_executor(), Box::new(TaskExecutor::new()), ); - let result = local_checker.check_changes_tries_proof( - 4, &remote_proof.roots, StorageProof::empty() - ); + let result = + local_checker.check_changes_tries_proof(4, &remote_proof.roots, StorageProof::empty()); assert!(result.is_err()); } #[test] fn check_body_proof_faulty() { - let header = header_with_computed_extrinsics_root( - vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])] - ); + let header = + header_with_computed_extrinsics_root(vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])]); let block = Block::new(header.clone(), Vec::new()); let local_checker = TestChecker::new( @@ -868,10 +959,7 @@ fn check_body_proof_faulty() { Box::new(TaskExecutor::new()), ); - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; + let body_request = RemoteBodyRequest { header: header.clone(), retry_count: None }; assert!( local_checker.check_body_proof(&body_request, block.extrinsics).is_err(), @@ -892,10 +980,7 @@ fn check_body_proof_of_same_data_should_succeed() { Box::new(TaskExecutor::new()), ); - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; + let body_request = RemoteBodyRequest { header: header.clone(), retry_count: None }; assert!(local_checker.check_body_proof(&body_request, block.extrinsics).is_ok()); } diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index bdd693f57b2d05861b98e3f73a843a278f5e3857..9e89dc932b7fa0ab092e73053e8232af1e88908e 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -16,48 +16,50 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use parity_scale_codec::{Encode, Decode, Joiner}; -use sc_executor::native_executor_instance; -use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryBackend}; -use substrate_test_runtime_client::{ - prelude::*, - runtime::{ - self, genesismap::{GenesisConfig, insert_genesis_block}, - Hash, Transfer, Block, BlockNumber, Header, Digest, RuntimeApi, - }, - AccountKeyring, Sr25519Keyring, TestClientBuilder, ClientBlockImportExt, - BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, -}; -use sc_client_api::{ - StorageProvider, BlockBackend, in_mem, BlockchainEvents, -}; +use futures::executor::block_on; +use hex_literal::hex; +use parity_scale_codec::{Decode, Encode, Joiner}; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{in_mem, BlockBackend, BlockchainEvents, StorageProvider}; use sc_client_db::{ - Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode, KeepBlocks, TransactionStorageMode + Backend, DatabaseSettings, DatabaseSettingsSrc, KeepBlocks, PruningMode, TransactionStorageMode, }; -use sc_block_builder::BlockBuilderProvider; -use sc_service::client::{self, Client, LocalCallExecutor, new_in_mem}; +use sc_executor::native_executor_instance; +use sc_service::client::{self, new_in_mem, Client, LocalCallExecutor}; +use sp_api::ProvideRuntimeApi; +use sp_consensus::{ + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, BlockStatus, + Error as ConsensusError, ForkChoiceStrategy, ImportResult, SelectChain, +}; +use sp_core::{blake2_256, testing::TaskExecutor, ChangesTrieConfiguration, H256}; use sp_runtime::{ - ConsensusEngineId, + generic::BlockId, traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, + ConsensusEngineId, DigestItem, Justifications, +}; +use sp_state_machine::{ + backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, +}; +use sp_storage::{ChildInfo, StorageKey}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, }; use substrate_test_runtime::TestAPI; -use sp_state_machine::backend::Backend as _; -use sp_api::ProvideRuntimeApi; -use sp_core::{H256, ChangesTrieConfiguration, blake2_256, testing::TaskExecutor}; -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use sp_consensus::{ - BlockOrigin, SelectChain, BlockImport, Error as ConsensusError, BlockCheckParams, ImportResult, - BlockStatus, BlockImportParams, ForkChoiceStrategy, +use substrate_test_runtime_client::{ + prelude::*, + runtime::{ + self, + genesismap::{insert_genesis_block, GenesisConfig}, + Block, BlockNumber, Digest, Hash, Header, RuntimeApi, Transfer, + }, + AccountKeyring, BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; -use sp_storage::{StorageKey, ChildInfo}; -use sp_trie::{TrieConfiguration, trie_types::Layout}; -use sp_runtime::{generic::BlockId, DigestItem, Justifications}; -use hex_literal::hex; -use futures::executor::block_on; -mod light; mod db; +mod light; const TEST_ENGINE_ID: ConsensusEngineId = *b"TEST"; @@ -68,11 +70,7 @@ native_executor_instance!( ); fn executor() -> sc_executor::NativeExecutor { - sc_executor::NativeExecutor::new( - sc_executor::WasmExecutionMethod::Interpreted, - None, - 8, - ) + sc_executor::NativeExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) } pub fn prepare_client_with_key_changes() -> ( @@ -80,14 +78,17 @@ pub fn prepare_client_with_key_changes() -> ( substrate_test_runtime_client::Backend, substrate_test_runtime_client::Executor, Block, - RuntimeApi + RuntimeApi, >, Vec, Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, ) { // prepare block structure let blocks_transfers = vec![ - vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)], + vec![ + (AccountKeyring::Alice, AccountKeyring::Dave), + (AccountKeyring::Bob, AccountKeyring::Dave), + ], vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], vec![], vec![(AccountKeyring::Alice, AccountKeyring::Dave)], @@ -101,18 +102,22 @@ pub fn prepare_client_with_key_changes() -> ( for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { let mut builder = remote_client.new_block(Default::default()).unwrap(); for (from, to) in block_transfers { - builder.push_transfer(Transfer { - from: from.into(), - to: to.into(), - amount: 1, - nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(), - }).unwrap(); + builder + .push_transfer(Transfer { + from: from.into(), + to: to.into(), + amount: 1, + nonce: *nonces.entry(from).and_modify(|n| *n = *n + 1).or_default(), + }) + .unwrap(); } let block = builder.build().unwrap().block; block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); - let trie_root = header.digest().log(DigestItem::as_changes_trie_root) + let trie_root = header + .digest() + .log(DigestItem::as_changes_trie_root) .map(|root| H256::from_slice(root.as_ref())) .unwrap(); local_roots.push(trie_root); @@ -121,10 +126,12 @@ pub fn prepare_client_with_key_changes() -> ( // prepare test cases let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); - let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); + let charlie = + blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); - let ferdie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); + let ferdie = + blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); let test_cases = vec![ (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), (1, 3, alice.clone(), vec![(1, 0)]), @@ -181,9 +188,9 @@ fn construct_block( Default::default(), &runtime_code, task_executor.clone() as Box<_>, - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); for tx in transactions.iter() { StateMachine::new( @@ -196,9 +203,9 @@ fn construct_block( Default::default(), &runtime_code, task_executor.clone() as Box<_>, - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); } let ret_data = StateMachine::new( @@ -211,9 +218,9 @@ fn construct_block( Default::default(), &runtime_code, task_executor.clone() as Box<_>, - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); header = Header::decode(&mut &ret_data[..]).unwrap(); (vec![].and(&Block { header, extrinsics: transactions }), hash) @@ -243,7 +250,8 @@ fn construct_genesis_should_work_with_native() { 1000, None, Default::default(), - ).genesis_map(); + ) + .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from(storage); @@ -263,9 +271,9 @@ fn construct_genesis_should_work_with_native() { Default::default(), &runtime_code, TaskExecutor::new(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); } #[test] @@ -277,7 +285,8 @@ fn construct_genesis_should_work_with_wasm() { 1000, None, Default::default(), - ).genesis_map(); + ) + .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from(storage); @@ -297,9 +306,9 @@ fn construct_genesis_should_work_with_wasm() { Default::default(), &runtime_code, TaskExecutor::new(), - ).execute( - ExecutionStrategy::AlwaysWasm, - ).unwrap(); + ) + .execute(ExecutionStrategy::AlwaysWasm) + .unwrap(); } #[test] @@ -311,7 +320,8 @@ fn construct_genesis_with_bad_transaction_should_panic() { 68, None, Default::default(), - ).genesis_map(); + ) + .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); let backend = InMemoryBackend::from(storage); @@ -331,9 +341,8 @@ fn construct_genesis_with_bad_transaction_should_panic() { Default::default(), &runtime_code, TaskExecutor::new(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ); + ) + .execute(ExecutionStrategy::NativeElseWasm); assert!(r.is_err()); } @@ -342,17 +351,23 @@ fn client_initializes_from_genesis_ok() { let client = substrate_test_runtime_client::new(); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ) + .unwrap(), 1000 ); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ) + .unwrap(), 0 ); } @@ -374,12 +389,14 @@ fn block_builder_works_with_transactions() { let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); let block = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); @@ -390,17 +407,23 @@ fn block_builder_works_with_transactions() { client.state_at(&BlockId::Number(0)).unwrap().pairs() ); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ) + .unwrap(), 958 ); assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into(), - ).unwrap(), + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ) + .unwrap(), 42 ); } @@ -411,21 +434,23 @@ fn block_builder_does_not_include_invalid() { let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); - assert!( - builder.push_transfer(Transfer { + assert!(builder + .push_transfer(Transfer { from: AccountKeyring::Eve.into(), to: AccountKeyring::Alice.into(), amount: 42, nonce: 0, - }).is_err() - ); + }) + .is_err()); let block = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); @@ -462,12 +487,7 @@ fn best_containing_with_hash_not_found() { let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - let uninserted_block = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; assert_eq!( None, @@ -498,8 +518,8 @@ fn uncles_with_multiple_forks() { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // B2 -> C3 + // A1 -> D2 let mut client = substrate_test_runtime_client::new(); // G -> A1 @@ -507,98 +527,104 @@ fn uncles_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); @@ -631,21 +657,11 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 - let a1 = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -675,8 +691,8 @@ fn best_containing_on_longest_chain_with_multiple_forks() { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // B2 -> C3 + // A1 -> D2 let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 @@ -684,67 +700,73 @@ fn best_containing_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // B2 -> C3 @@ -764,18 +786,18 @@ fn best_containing_on_longest_chain_with_multiple_forks() { block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); @@ -957,10 +979,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { .unwrap() .unwrap() ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a5.hash(), Some(4))).unwrap() - ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(4))).unwrap()); assert_eq!( b4.hash(), block_on(longest_chain_select.finality_target(b2.hash(), Some(4))) @@ -1017,14 +1036,8 @@ fn best_containing_on_longest_chain_with_multiple_forks() { .unwrap() .unwrap() ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a4.hash(), Some(3))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a5.hash(), Some(3))).unwrap() - ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(3))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(3))).unwrap()); assert_eq!( b3.hash(), block_on(longest_chain_select.finality_target(b2.hash(), Some(3))) @@ -1037,10 +1050,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { .unwrap() .unwrap() ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b4.hash(), Some(3))).unwrap() - ); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(3))).unwrap()); assert_eq!( c3.hash(), block_on(longest_chain_select.finality_target(c3.hash(), Some(3))) @@ -1073,36 +1083,18 @@ fn best_containing_on_longest_chain_with_multiple_forks() { .unwrap() .unwrap() ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a3.hash(), Some(2))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a4.hash(), Some(2))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a5.hash(), Some(2))).unwrap() - ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(2))).unwrap()); assert_eq!( b2.hash(), block_on(longest_chain_select.finality_target(b2.hash(), Some(2))) .unwrap() .unwrap() ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b3.hash(), Some(2))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b4.hash(), Some(2))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(c3.hash(), Some(2))).unwrap() - ); + assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(2))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(2))).unwrap()); assert_eq!( d2.hash(), block_on(longest_chain_select.finality_target(d2.hash(), Some(2))) @@ -1123,83 +1115,32 @@ fn best_containing_on_longest_chain_with_multiple_forks() { .unwrap() .unwrap() ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a2.hash(), Some(1))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a3.hash(), Some(1))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a4.hash(), Some(1))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a5.hash(), Some(1))).unwrap() - ); - - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b2.hash(), Some(1))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b3.hash(), Some(1))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b4.hash(), Some(1))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(c3.hash(), Some(1))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(d2.hash(), Some(1))).unwrap() - ); - - // search only blocks with number <= 0 + assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(1))).unwrap()); + + assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(1))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(d2.hash(), Some(1))).unwrap()); + + // search only blocks with number <= 0 assert_eq!( genesis_hash, block_on(longest_chain_select.finality_target(genesis_hash, Some(0))) .unwrap() .unwrap() ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a1.hash(), Some(0))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a2.hash(), Some(0))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a3.hash(), Some(0))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a4.hash(), Some(0))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(a5.hash(), Some(0))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b2.hash(), Some(0))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b3.hash(), Some(0))).unwrap() - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(b4.hash(), Some(0))).unwrap() - ); + assert_eq!(None, block_on(longest_chain_select.finality_target(a1.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(0))).unwrap()); + assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(0))).unwrap()); assert_eq!( None, block_on(longest_chain_select.finality_target(c3.hash().clone(), Some(0))).unwrap(), @@ -1218,21 +1159,11 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 - let a1 = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -1251,18 +1182,12 @@ fn key_changes_works() { for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { let end = client.block_hash(end).unwrap().unwrap(); - let actual_result = client.key_changes( - begin, - BlockId::Hash(end), - None, - &StorageKey(key), - ).unwrap(); + let actual_result = + client.key_changes(begin, BlockId::Hash(end), None, &StorageKey(key)).unwrap(); if actual_result != expected_result { panic!( "Failed test {}: actual = {:?}, expected = {:?}", - index, - actual_result, - expected_result, + index, actual_result, expected_result, ); } } @@ -1277,41 +1202,31 @@ fn import_with_justification() { block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone())).unwrap(); - assert_eq!( - client.chain_info().finalized_hash, - a3.hash(), - ); + assert_eq!(client.chain_info().finalized_hash, a3.hash(),); - assert_eq!( - client.justifications(&BlockId::Hash(a3.hash())).unwrap(), - Some(justification), - ); + assert_eq!(client.justifications(&BlockId::Hash(a3.hash())).unwrap(), Some(justification),); - assert_eq!( - client.justifications(&BlockId::Hash(a1.hash())).unwrap(), - None, - ); + assert_eq!(client.justifications(&BlockId::Hash(a1.hash())).unwrap(), None,); - assert_eq!( - client.justifications(&BlockId::Hash(a2.hash())).unwrap(), - None, - ); + assert_eq!(client.justifications(&BlockId::Hash(a2.hash())).unwrap(), None,); } #[test] @@ -1321,54 +1236,44 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { // G -> A1 -> A2 // \ // -> B1 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); // create but don't import B1 just yet let b1 = b1.build().unwrap().block; // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); + assert_eq!(client.chain_info().best_hash, a2.hash(),); // importing B1 as finalized should trigger a re-org and set it as new best let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); block_on(client.import_justified(BlockOrigin::Own, b1.clone(), justification)).unwrap(); - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().best_hash, b1.hash(),); - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().finalized_hash, b1.hash(),); } #[test] @@ -1378,84 +1283,70 @@ fn finalizing_diverged_block_should_trigger_reorg() { // G -> A1 -> A2 // \ // -> B1 -> B2 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); - let b2 = client.new_block_at( - &BlockId::Hash(b1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); + assert_eq!(client.chain_info().best_hash, a2.hash(),); // we finalize block B1 which is on a different branch from current best // which should trigger a re-org. ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); // B1 should now be the latest finalized - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().finalized_hash, b1.hash(),); // and B1 should be the new best block (`finalize_block` as no way of // knowing about B2) - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); + assert_eq!(client.chain_info().best_hash, b1.hash(),); // `SelectChain` should report B2 as best block though - assert_eq!( - block_on(select_chain.best_chain()).unwrap().hash(), - b2.hash(), - ); + assert_eq!(block_on(select_chain.best_chain()).unwrap().hash(), b2.hash(),); // after we build B3 on top of B2 and import it // it should be the new best block, - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); - assert_eq!( - client.chain_info().best_hash, - b3.hash(), - ); + assert_eq!(client.chain_info().best_hash, b3.hash(),); } #[test] @@ -1473,55 +1364,53 @@ fn state_reverted_on_reorg() { sp_tracing::try_init_simple(); let mut client = substrate_test_runtime_client::new(); - let current_balance = |client: &substrate_test_runtime_client::TestClient| - client.runtime_api().balance_of( - &BlockId::number(client.chain_info().best_number), AccountKeyring::Alice.into(), - ).unwrap(); + let current_balance = |client: &substrate_test_runtime_client::TestClient| { + client + .runtime_api() + .balance_of( + &BlockId::number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ) + .unwrap() + }; // G -> A1 -> A2 // \ // -> B1 - let mut a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); a1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 10, nonce: 0, - }).unwrap(); + }) + .unwrap(); let a1 = a1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 50, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; // Reorg to B1 block_on(client.import_as_best(BlockOrigin::Own, b1.clone())).unwrap(); assert_eq!(950, current_balance(&client)); - let mut a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); a2.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 10, nonce: 1, - }).unwrap(); + }) + .unwrap(); let a2 = a2.build().unwrap().block; // Re-org to A2 block_on(client.import_as_best(BlockOrigin::Own, a2)).unwrap(); @@ -1535,20 +1424,20 @@ fn doesnt_import_blocks_that_revert_finality() { // we need to run with archive pruning to avoid pruning non-canonical // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - state_pruning: PruningMode::ArchiveAll, - keep_blocks: KeepBlocks::All, - transaction_storage: TransactionStorageMode::BlockBody, - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 1024, + let backend = Arc::new( + Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + source: DatabaseSettingsSrc::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, - }, - u64::MAX, - ).unwrap()); + u64::MAX, + ) + .unwrap(), + ); let mut client = TestClientBuilder::with_backend(backend).build(); @@ -1558,18 +1447,20 @@ fn doesnt_import_blocks_that_revert_finality() { // \ // -> B1 -> B2 -> B3 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1580,18 +1471,27 @@ fn doesnt_import_blocks_that_revert_finality() { to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); - let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // prepare B3 before we finalize A2, because otherwise we won't be able to // read changes trie configuration after A2 is finalized - let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesn't include it @@ -1599,15 +1499,13 @@ fn doesnt_import_blocks_that_revert_finality() { let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::RuntimeApiError( - sp_api::ApiError::Application(Box::new(sp_blockchain::Error::NotInFinalizedChain)) - ).to_string() + sp_blockchain::Error::RuntimeApiError(sp_api::ApiError::Application(Box::new( + sp_blockchain::Error::NotInFinalizedChain, + ))) + .to_string(), ); - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); + assert_eq!(import_err.to_string(), expected_err.to_string(),); // adding a C1 block which is lower than the last finalized should also // fail (with a cheaper check that doesn't require checking ancestry). @@ -1619,18 +1517,15 @@ fn doesnt_import_blocks_that_revert_finality() { to: AccountKeyring::Ferdie.into(), amount: 2, nonce: 0, - }).unwrap(); + }) + .unwrap(); let c1 = c1.build().unwrap().block; let import_err = block_on(client.import(BlockOrigin::Own, c1)).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() - ); + let expected_err = + ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); + assert_eq!(import_err.to_string(), expected_err.to_string(),); } #[test] @@ -1644,15 +1539,16 @@ fn respects_block_rules() { TestClientBuilder::new().build() } else { TestClientBuilder::new() - .set_block_rules( - Some(fork_rules.clone()), - Some(known_bad.clone()), - ) + .set_block_rules(Some(fork_rules.clone()), Some(known_bad.clone())) .build() }; - let block_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; + let block_ok = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; let params = BlockCheckParams { hash: block_ok.hash().clone(), @@ -1664,8 +1560,8 @@ fn respects_block_rules() { assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 - let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap(); + let mut block_not_ok = + client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; @@ -1686,8 +1582,8 @@ fn respects_block_rules() { block_on(client.import_as_final(BlockOrigin::Own, block_ok)).unwrap(); // And check good fork - let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); + let mut block_ok = + client.new_block_at(&BlockId::Number(1), Default::default(), false).unwrap(); block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); let block_ok = block_ok.build().unwrap().block; @@ -1704,8 +1600,8 @@ fn respects_block_rules() { assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // And now try bad fork - let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); + let mut block_not_ok = + client.new_block_at(&BlockId::Number(1), Default::default(), false).unwrap(); block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); let block_not_ok = block_not_ok.build().unwrap().block; @@ -1739,28 +1635,29 @@ fn returns_status_for_pruned_blocks() { // set to prune after 1 block // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - state_pruning: PruningMode::keep_blocks(1), - keep_blocks: KeepBlocks::All, - transaction_storage: TransactionStorageMode::BlockBody, - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 1024, + let backend = Arc::new( + Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + state_pruning: PruningMode::keep_blocks(1), + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + source: DatabaseSettingsSrc::RocksDb { path: tmp.path().into(), cache_size: 1024 }, }, - }, - u64::MAX, - ).unwrap()); + u64::MAX, + ) + .unwrap(), + ); let mut client = TestClientBuilder::with_backend(backend).build(); - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1770,7 +1667,8 @@ fn returns_status_for_pruned_blocks() { to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; let check_block_a1 = BlockCheckParams { @@ -1801,11 +1699,12 @@ fn returns_status_for_pruned_blocks() { BlockStatus::InChainWithState, ); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import_as_final(BlockOrigin::Own, a2.clone())).unwrap(); let check_block_a2 = BlockCheckParams { @@ -1833,11 +1732,12 @@ fn returns_status_for_pruned_blocks() { BlockStatus::InChainWithState, ); - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import_as_final(BlockOrigin::Own, a3.clone())).unwrap(); let check_block_a3 = BlockCheckParams { @@ -1904,7 +1804,8 @@ fn imports_blocks_with_changes_tries_config_change() { .changes_trie_config(Some(ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2, - })).build(); + })) + .build(); // =================================================================== // blocks 1,2,3,4,5,6,7,8,9,10 are empty @@ -1923,70 +1824,114 @@ fn imports_blocks_with_changes_tries_config_change() { // block 31 is L1 digest that covers this change // =================================================================== (1..11).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (11..12).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (12..23).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (23..24).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 5, - digest_levels: 1, - })).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 5, + digest_levels: 1, + })) + .unwrap(); let block = block.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (24..26).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (26..27).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (27..28).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (28..29).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 3, - digest_levels: 1, - })).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 3, + digest_levels: 1, + })) + .unwrap(); let block = block.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (29..30).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (30..31).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); let block = block.build().unwrap().block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (31..32).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); @@ -2010,44 +1955,53 @@ fn storage_keys_iter_prefix_and_start_key_works() { let prefix = StorageKey(hex!("3a").to_vec()); let child_prefix = StorageKey(b"sec".to_vec()); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + let res: Vec<_> = client + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() .map(|x| x.0) .collect(); - assert_eq!(res, [ - child_root.clone(), - hex!("3a636f6465").to_vec(), - hex!("3a686561707061676573").to_vec(), - ]); + assert_eq!( + res, + [child_root.clone(), hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec(),] + ); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a636f6465").to_vec())), + ) .unwrap() .map(|x| x.0) .collect(); assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a686561707061676573").to_vec()))) + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a686561707061676573").to_vec())), + ) .unwrap() .map(|x| x.0) .collect(); assert_eq!(res, Vec::>::new()); - let res: Vec<_> = client.child_storage_keys_iter( - &BlockId::Number(0), - child_info.clone(), - Some(&child_prefix), - None, - ).unwrap() + let res: Vec<_> = client + .child_storage_keys_iter(&BlockId::Number(0), child_info.clone(), Some(&child_prefix), None) + .unwrap() .map(|x| x.0) .collect(); assert_eq!(res, [b"second".to_vec()]); - let res: Vec<_> = client.child_storage_keys_iter( - &BlockId::Number(0), - child_info, - None, - Some(&StorageKey(b"second".to_vec())), - ).unwrap() + let res: Vec<_> = client + .child_storage_keys_iter( + &BlockId::Number(0), + child_info, + None, + Some(&StorageKey(b"second".to_vec())), + ) + .unwrap() .map(|x| x.0) .collect(); assert_eq!(res, [b"third".to_vec()]); @@ -2059,30 +2013,52 @@ fn storage_keys_iter_works() { let prefix = StorageKey(hex!("").to_vec()); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + let res: Vec<_> = client + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() .take(2) .map(|x| x.0) .collect(); - assert_eq!(res, [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()]); + assert_eq!( + res, + [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()] + ); - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a636f6465").to_vec())), + ) .unwrap() .take(3) .map(|x| x.0) .collect(); - assert_eq!(res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - ]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec()))) + assert_eq!( + res, + [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ] + ); + + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey( + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + )), + ) .unwrap() .take(1) .map(|x| x.0) .collect(); - assert_eq!(res, [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()]); + assert_eq!( + res, + [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()] + ); } #[test] @@ -2092,26 +2068,29 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = - new_in_mem::< - _, - substrate_test_runtime_client::runtime::Block, - _, - substrate_test_runtime_client::runtime::RuntimeApi, - >( - substrate_test_runtime_client::new_native_executor(), - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - None, - None, - None, - Box::new(TaskExecutor::new()), - Default::default(), - ) - .unwrap(); + let mut client = new_in_mem::< + _, + substrate_test_runtime_client::runtime::Block, + _, + substrate_test_runtime_client::runtime::RuntimeApi, + >( + substrate_test_runtime_client::new_native_executor(), + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + None, + None, + None, + Box::new(TaskExecutor::new()), + Default::default(), + ) + .unwrap(); type TestClient = Client< in_mem::Backend, - LocalCallExecutor, sc_executor::NativeExecutor>, + LocalCallExecutor< + Block, + in_mem::Backend, + sc_executor::NativeExecutor, + >, substrate_test_runtime_client::runtime::Block, substrate_test_runtime_client::runtime::RuntimeApi, >; @@ -2123,12 +2102,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // for some reason I can't seem to use `ClientBlockImportExt` let bake_and_import_block = |client: &mut TestClient, origin| { - let block = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); @@ -2168,44 +2142,43 @@ fn cleans_up_closed_notification_sinks_on_block_import() { fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifications() { let mut client = TestClientBuilder::new().build(); - let mut notification_stream = futures::executor::block_on_stream( - client.import_notification_stream() - ); + let mut notification_stream = + futures::executor::block_on_stream(client.import_notification_stream()); - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::NetworkInitialSync, a1.clone())).unwrap(); - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::NetworkInitialSync, a2.clone())).unwrap(); - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); // needed to make sure B1 gets a different hash from A1 b1.push_transfer(Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Ferdie.into(), amount: 1, nonce: 0, - }).unwrap(); + }) + .unwrap(); let b1 = b1.build().unwrap().block; block_on(client.import(BlockOrigin::NetworkInitialSync, b1.clone())).unwrap(); - let b2 = client.new_block_at( - &BlockId::Hash(b1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; // Should trigger a notification because we reorg block_on(client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone())).unwrap(); diff --git a/substrate/client/service/test/src/lib.rs b/substrate/client/service/test/src/lib.rs index 44228d1575cc2da28f8fd26fd08a66d966e32481..9433ed0bde06f0879f6f4c5158bf05ba5e6709dd 100644 --- a/substrate/client/service/test/src/lib.rs +++ b/substrate/client/service/test/src/lib.rs @@ -18,38 +18,27 @@ //! Service integration test utils. -use std::iter; -use std::sync::Arc; -use std::net::Ipv4Addr; -use std::pin::Pin; -use std::time::Duration; -use log::{info, debug}; -use futures01::{Future, Stream, Poll}; use futures::{FutureExt as _, TryFutureExt as _}; -use tempfile::TempDir; -use tokio::{runtime::Runtime, prelude::FutureExt}; -use tokio::timer::Interval; +use futures01::{Future, Poll, Stream}; +use log::{debug, info}; +use parking_lot::Mutex; +use sc_client_api::{Backend, CallExecutor}; +use sc_network::{ + config::{NetworkConfiguration, TransportConfig}, + multiaddr, Multiaddr, +}; use sc_service::{ - TaskManager, - SpawnTaskHandle, - GenericChainSpec, - ChainSpecExtension, - Configuration, - KeepBlocks, TransactionStorageMode, - config::{BasePath, DatabaseConfig, KeystoreConfig}, - RuntimeGenesis, - Role, - Error, - TaskExecutor, client::Client, + config::{BasePath, DatabaseConfig, KeystoreConfig}, + ChainSpecExtension, Configuration, Error, GenericChainSpec, KeepBlocks, Role, RuntimeGenesis, + SpawnTaskHandle, TaskExecutor, TaskManager, TransactionStorageMode, }; +use sc_transaction_pool_api::TransactionPool; use sp_blockchain::HeaderBackend; -use sc_network::{multiaddr, Multiaddr}; -use sc_network::config::{NetworkConfiguration, TransportConfig}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sc_transaction_pool_api::TransactionPool; -use sc_client_api::{Backend, CallExecutor}; -use parking_lot::Mutex; +use std::{iter, net::Ipv4Addr, pin::Pin, sync::Arc, time::Duration}; +use tempfile::TempDir; +use tokio::{prelude::FutureExt, runtime::Runtime, timer::Interval}; #[cfg(test)] mod client; @@ -67,7 +56,9 @@ struct TestNet { nodes: usize, } -pub trait TestNetNode: Clone + Future + Send + 'static { +pub trait TestNetNode: + Clone + Future + Send + 'static +{ type Block: BlockT; type Backend: Backend; type Executor: CallExecutor + Send + Sync; @@ -76,7 +67,9 @@ pub trait TestNetNode: Clone + Future + Se fn client(&self) -> Arc>; fn transaction_pool(&self) -> Arc; - fn network(&self) -> Arc::Hash>>; + fn network( + &self, + ) -> Arc::Hash>>; fn spawn_handle(&self) -> SpawnTaskHandle; } @@ -88,23 +81,21 @@ pub struct TestNetComponents { } impl -TestNetComponents { + TestNetComponents +{ pub fn new( task_manager: TaskManager, client: Arc>, network: Arc::Hash>>, transaction_pool: Arc, ) -> Self { - Self { - client, transaction_pool, network, - task_manager: Arc::new(Mutex::new(task_manager)), - } + Self { client, transaction_pool, network, task_manager: Arc::new(Mutex::new(task_manager)) } } } - -impl Clone for -TestNetComponents { +impl Clone + for TestNetComponents +{ fn clone(&self) -> Self { Self { task_manager: self.task_manager.clone(), @@ -115,8 +106,8 @@ TestNetComponents { } } -impl Future for - TestNetComponents +impl Future + for TestNetComponents { type Item = (); type Error = sc_service::Error; @@ -126,14 +117,14 @@ impl Future for } } -impl TestNetNode for -TestNetComponents - where - TBl: BlockT, - TBackend: sc_client_api::Backend + Send + Sync + 'static, - TExec: CallExecutor + Send + Sync + 'static, - TRtApi: Send + Sync + 'static, - TExPool: TransactionPool + Send + Sync + 'static, +impl TestNetNode + for TestNetComponents +where + TBl: BlockT, + TBackend: sc_client_api::Backend + Send + Sync + 'static, + TExec: CallExecutor + Send + Sync + 'static, + TRtApi: Send + Sync + 'static, + TExPool: TransactionPool + Send + Sync + 'static, { type Block = TBl; type Backend = TBackend; @@ -147,7 +138,9 @@ TestNetComponents fn transaction_pool(&self) -> Arc { self.transaction_pool.clone() } - fn network(&self) -> Arc::Hash>> { + fn network( + &self, + ) -> Arc::Hash>> { self.network.clone() } fn spawn_handle(&self) -> SpawnTaskHandle { @@ -156,33 +149,32 @@ TestNetComponents } impl TestNet -where F: Clone + Send + 'static, L: Clone + Send +'static, U: Clone + Send + 'static +where + F: Clone + Send + 'static, + L: Clone + Send + 'static, + U: Clone + Send + 'static, { - pub fn run_until_all_full( - &mut self, - full_predicate: FP, - light_predicate: LP, - ) - where - FP: Send + Fn(usize, &F) -> bool + 'static, - LP: Send + Fn(usize, &L) -> bool + 'static, + pub fn run_until_all_full(&mut self, full_predicate: FP, light_predicate: LP) + where + FP: Send + Fn(usize, &F) -> bool + 'static, + LP: Send + Fn(usize, &L) -> bool + 'static, { let full_nodes = self.full_nodes.clone(); let light_nodes = self.light_nodes.clone(); let interval = Interval::new_interval(Duration::from_millis(100)) .map_err(|_| ()) .for_each(move |_| { - let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)| - full_predicate(*id, service) - ); + let full_ready = full_nodes + .iter() + .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)); if !full_ready { - return Ok(()); + return Ok(()) } - let light_ready = light_nodes.iter().all(|&(ref id, ref service, _)| - light_predicate(*id, service) - ); + let light_ready = light_nodes + .iter() + .all(|&(ref id, ref service, _)| light_predicate(*id, service)); if !light_ready { Ok(()) @@ -200,7 +192,10 @@ where F: Clone + Send + 'static, L: Clone + Send +'static, U: Clone + Send + 'st } } -fn node_config ( +fn node_config< + G: RuntimeGenesis + 'static, + E: ChainSpecExtension + Clone + 'static + Send + Sync, +>( index: usize, spec: &GenericChainSpec, role: Role, @@ -208,8 +203,7 @@ fn node_config, base_port: u16, root: &TempDir, -) -> Configuration -{ +) -> Configuration { let root = root.path().join(format!("node-{}", index)); let mut network_config = NetworkConfiguration::new( @@ -224,7 +218,7 @@ fn node_config TestNet where +impl TestNet +where F: TestNetNode, L: TestNetNode, E: ChainSpecExtension + Clone + 'static + Send + Sync, @@ -295,11 +284,8 @@ impl TestNet where spec: GenericChainSpec, full: impl Iterator Result<(F, U), Error>>, light: impl Iterator Result>, - authorities: impl Iterator Result<(F, U), Error> - )>, - base_port: u16 + authorities: impl Iterator Result<(F, U), Error>)>, + base_port: u16, ) -> TestNet { sp_tracing::try_init_simple(); fdlimit::raise_fd_limit(); @@ -322,7 +308,7 @@ impl TestNet where temp: &TempDir, full: impl Iterator Result<(F, U), Error>>, light: impl Iterator Result>, - authorities: impl Iterator Result<(F, U), Error>)> + authorities: impl Iterator Result<(F, U), Error>)>, ) { let executor = self.runtime.executor(); let task_executor: TaskExecutor = { @@ -330,7 +316,8 @@ impl TestNet where (move |fut: Pin + Send>>, _| { executor.spawn(fut.unit_error().compat()); async {} - }).into() + }) + .into() }; for (key, authority) in authorities { @@ -344,10 +331,12 @@ impl TestNet where &temp, ); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let (service, user_data) = authority(node_config).expect("Error creating test node service"); + let (service, user_data) = + authority(node_config).expect("Error creating test node service"); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + let addr = addr + .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -366,7 +355,8 @@ impl TestNet where let (service, user_data) = full(node_config).expect("Error creating test node service"); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + let addr = addr + .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -385,7 +375,8 @@ impl TestNet where let service = light(node_config).expect("Error creating test node service"); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + let addr = addr + .with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); self.light_nodes.push((self.nodes, service, addr)); self.nodes += 1; } @@ -393,7 +384,10 @@ impl TestNet where } fn tempdir_with_prefix(prefix: &str) -> TempDir { - tempfile::Builder::new().prefix(prefix).tempdir().expect("Error creating test dir") + tempfile::Builder::new() + .prefix(prefix) + .tempdir() + .expect("Error creating test dir") } pub fn connectivity( @@ -420,8 +414,8 @@ pub fn connectivity( let mut network = TestNet::new( &temp, spec.clone(), - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -430,11 +424,15 @@ pub fn connectivity( info!("Checking star topology"); let first_address = network.full_nodes[0].3.clone(); for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } @@ -464,8 +462,8 @@ pub fn connectivity( let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), @@ -477,14 +475,18 @@ pub fn connectivity( for i in 0..max_nodes { if i != 0 { if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { - service.network().add_reserved_peer(address.to_string()) + service + .network() + .add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); } } if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service.network().add_reserved_peer(address.to_string()) + service + .network() + .add_reserved_peer(address.to_string()) .expect("Error adding reserved peer"); address = node_id.clone(); } @@ -512,7 +514,7 @@ pub fn sync( full_builder: Fb, light_builder: Lb, mut make_block_and_import: B, - mut extrinsic_factory: ExF + mut extrinsic_factory: ExF, ) where Fb: Fn(Configuration) -> Result<(F, U), Error>, F: TestNetNode, @@ -532,8 +534,8 @@ pub fn sync( let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg)), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), @@ -542,7 +544,7 @@ pub fn sync( info!("Checking block sync"); let first_address = { let &mut (_, ref first_service, ref mut first_user_data, _) = &mut network.full_nodes[0]; - for i in 0 .. NUM_BLOCKS { + for i in 0..NUM_BLOCKS { if i % 128 == 0 { info!("Generating #{}", i + 1); } @@ -550,24 +552,29 @@ pub fn sync( make_block_and_import(&first_service, first_user_data); } let info = network.full_nodes[0].1.client().info(); - network.full_nodes[0].1.network().new_best_block_imported(info.best_hash, info.best_number); + network.full_nodes[0] + .1 + .network() + .new_best_block_imported(info.best_hash, info.best_number); network.full_nodes[0].3.clone() }; info!("Running sync"); for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full( - |_index, service| - service.client().info().best_number == (NUM_BLOCKS as u32).into(), - |_index, service| - service.client().info().best_number == (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().best_number == (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().best_number == (NUM_BLOCKS as u32).into(), ); info!("Checking extrinsic propagation"); @@ -577,9 +584,12 @@ pub fn sync( let extrinsic = extrinsic_factory(&first_service, first_user_data); let source = sc_transaction_pool_api::TransactionSource::External; - futures::executor::block_on( - first_service.transaction_pool().submit_one(&best_block, source, extrinsic) - ).expect("failed to submit extrinsic"); + futures::executor::block_on(first_service.transaction_pool().submit_one( + &best_block, + source, + extrinsic, + )) + .expect("failed to submit extrinsic"); network.run_until_all_full( |_index, service| service.transaction_pool().ready().count() == 1, @@ -591,7 +601,7 @@ pub fn consensus( spec: GenericChainSpec, full_builder: Fb, light_builder: Lb, - authorities: impl IntoIterator + authorities: impl IntoIterator, ) where Fb: Fn(Configuration) -> Result, F: TestNetNode, @@ -607,54 +617,64 @@ pub fn consensus( let mut network = TestNet::new( &temp, spec, - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), - authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), + (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), + authorities + .into_iter() + .map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), 30600, ); info!("Checking consensus"); let first_address = network.authority_nodes[0].3.clone(); for (_, service, _, _) in network.full_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _, _) in network.authority_nodes.iter().skip(1) { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full( - |_index, service| - service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), - |_index, service| - service.client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), + |_index, service| { + service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into() + }, + |_index, service| service.client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), ); info!("Adding more peers"); network.insert_nodes( &temp, - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise // the type of the closure cannot be inferred. (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), ); for (_, service, _, _) in network.full_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } for (_, service, _) in network.light_nodes.iter() { - service.network().add_reserved_peer(first_address.to_string()) + service + .network() + .add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } network.run_until_all_full( - |_index, service| - service.client().info().finalized_number >= (NUM_BLOCKS as u32).into(), - |_index, service| - service.client().info().best_number >= (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().finalized_number >= (NUM_BLOCKS as u32).into(), + |_index, service| service.client().info().best_number >= (NUM_BLOCKS as u32).into(), ); } diff --git a/substrate/client/state-db/src/lib.rs b/substrate/client/state-db/src/lib.rs index 1340442061abaa978e5d1d917c7e1a42181c116e..cdff39895d229448844ada1d4cef25b869679b9b 100644 --- a/substrate/client/state-db/src/lib.rs +++ b/substrate/client/state-db/src/lib.rs @@ -44,15 +44,17 @@ mod pruning; #[cfg(test)] mod test; -use std::fmt; -use parking_lot::RwLock; use codec::Codec; -use std::collections::{HashMap, hash_map::Entry}; +use log::trace; use noncanonical::NonCanonicalOverlay; +use parity_util_mem::{malloc_size, MallocSizeOf}; +use parking_lot::RwLock; use pruning::RefWindow; -use log::trace; -use parity_util_mem::{MallocSizeOf, malloc_size}; -use sc_client_api::{StateDbMemoryInfo, MemorySize}; +use sc_client_api::{MemorySize, StateDbMemoryInfo}; +use std::{ + collections::{hash_map::Entry, HashMap}, + fmt, +}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -63,8 +65,35 @@ const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; pub type DBValue = Vec; /// Basic set of requirements for the Block hash and node key types. -pub trait Hash: Send + Sync + Sized + Eq + PartialEq + Clone + Default + fmt::Debug + Codec + std::hash::Hash + 'static {} -impl Hash for T {} +pub trait Hash: + Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static +{ +} +impl< + T: Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static, + > Hash for T +{ +} /// Backend database trait. Read-only. pub trait MetaDb { @@ -168,17 +197,14 @@ pub enum PruningMode { impl PruningMode { /// Create a mode that keeps given number of blocks. pub fn keep_blocks(n: u32) -> PruningMode { - PruningMode::Constrained(Constraints { - max_blocks: Some(n), - max_mem: None, - }) + PruningMode::Constrained(Constraints { max_blocks: Some(n), max_mem: None }) } /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? pub fn is_archive(&self) -> bool { match *self { PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => true, - PruningMode::Constrained(_) => false + PruningMode::Constrained(_) => false, } } @@ -224,20 +250,12 @@ impl StateDbSync = NonCanonicalOverlay::new(db)?; let pruning: Option> = match mode { - PruningMode::Constrained(Constraints { - max_mem: Some(_), - .. - }) => unimplemented!(), + PruningMode::Constrained(Constraints { max_mem: Some(_), .. }) => unimplemented!(), PruningMode::Constrained(_) => Some(RefWindow::new(db, ref_counting)?), PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, }; - Ok(StateDbSync { - mode, - non_canonical, - pruning, - pinned: Default::default(), - }) + Ok(StateDbSync { mode, non_canonical, pruning, pinned: Default::default() }) } fn check_meta(mode: &PruningMode, db: &D) -> Result<(), Error> { @@ -270,10 +288,7 @@ impl StateDbSync { changeset.deleted.clear(); // write changes immediately - Ok(CommitSet { - data: changeset, - meta, - }) + Ok(CommitSet { data: changeset, meta }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); @@ -281,7 +296,7 @@ impl StateDbSync StateDbSync { + Ok(()) => if self.mode == PruningMode::ArchiveCanonical { commit.data.deleted.clear(); - } - } + }, Err(e) => return Err(e), }; if let Some(ref mut pruning) = self.pruning { @@ -319,31 +333,30 @@ impl StateDbSync c).unwrap_or(true) { !self.non_canonical.have_block(hash) } else { - self.pruning - .as_ref() - .map_or( - false, - |pruning| number < pruning.pending() || !pruning.have_block(hash), - ) + self.pruning.as_ref().map_or(false, |pruning| { + number < pruning.pending() || !pruning.have_block(hash) + }) } - } + }, } } fn prune(&mut self, commit: &mut CommitSet) { - if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { + if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = + (&mut self.pruning, &self.mode) + { loop { if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break; + break } if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break; + break } let pinned = &self.pinned; if pruning.next_hash().map_or(false, |h| pinned.contains_key(&h)) { - break; + break } pruning.prune_one(commit); } @@ -355,23 +368,17 @@ impl StateDbSync Option> { match self.mode { - PruningMode::ArchiveAll => { - Some(CommitSet::default()) - }, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - self.non_canonical.revert_one() - }, + PruningMode::ArchiveAll => Some(CommitSet::default()), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => + self.non_canonical.revert_one(), } } fn remove(&mut self, hash: &BlockHash) -> Option> { match self.mode { - PruningMode::ArchiveAll => { - Some(CommitSet::default()) - }, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - self.non_canonical.remove(hash) - }, + PruningMode::ArchiveAll => Some(CommitSet::default()), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => + self.non_canonical.remove(hash), } } @@ -392,7 +399,7 @@ impl StateDbSync StateDbSync(&self, key: &Q, db: &D) -> Result, Error> + pub fn get( + &self, + key: &Q, + db: &D, + ) -> Result, Error> where Q: AsRef, Key: std::borrow::Borrow, Q: std::hash::Hash + Eq, { if let Some(value) = self.non_canonical.get(key) { - return Ok(Some(value)); + return Ok(Some(value)) } db.get(key.as_ref()).map_err(|e| Error::Db(e)) } @@ -469,9 +480,7 @@ impl StateDb Result, Error> { - Ok(StateDb { - db: RwLock::new(StateDbSync::new(mode, ref_counting, db)?) - }) + Ok(StateDb { db: RwLock::new(StateDbSync::new(mode, ref_counting, db)?) }) } /// Add a new non-canonical block. @@ -504,11 +513,15 @@ impl StateDb(&self, key: &Q, db: &D) -> Result, Error> - where - Q: AsRef, - Key: std::borrow::Borrow, - Q: std::hash::Hash + Eq, + pub fn get( + &self, + key: &Q, + db: &D, + ) -> Result, Error> + where + Q: AsRef, + Key: std::borrow::Borrow, + Q: std::hash::Hash + Eq, { self.db.read().get(key, db) } @@ -554,10 +567,12 @@ impl StateDb (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); @@ -679,13 +694,13 @@ mod tests { let state_db = StateDb::new(PruningMode::ArchiveAll, false, &db).unwrap(); db.commit( &state_db - .insert_block::( - &H256::from_low_u64_be(0), - 0, - &H256::from_low_u64_be(0), - make_changeset(&[], &[]), - ) - .unwrap(), + .insert_block::( + &H256::from_low_u64_be(0), + 0, + &H256::from_low_u64_be(0), + make_changeset(&[], &[]), + ) + .unwrap(), ); let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); let state_db: Result, _> = StateDb::new(new_mode, false, &db); diff --git a/substrate/client/state-db/src/noncanonical.rs b/substrate/client/state-db/src/noncanonical.rs index de6d1bfcf8bb28314642f7c54e44a63962f77943..eff440d3375c769ed78167942c3057791eaba632 100644 --- a/substrate/client/state-db/src/noncanonical.rs +++ b/substrate/client/state-db/src/noncanonical.rs @@ -22,11 +22,13 @@ //! All pending changes are kept in memory until next call to `apply_pending` or //! `revert_pending` -use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; -use codec::{Encode, Decode}; +use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb}; +use codec::{Decode, Encode}; use log::trace; +use std::{ + collections::{hash_map::Entry, HashMap, VecDeque}, + fmt, +}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; @@ -40,8 +42,8 @@ pub struct NonCanonicalOverlay { parents: HashMap, pending_canonicalizations: Vec, pending_insertions: Vec, - values: HashMap, //ref counted - //would be deleted but kept around because block is pinned, ref counted. + values: HashMap, // ref counted + // would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, pinned_insertions: HashMap, u32)>, } @@ -69,10 +71,7 @@ impl OverlayLevel { } fn new() -> OverlayLevel { - OverlayLevel { - blocks: Vec::new(), - used_indicies: 0, - } + OverlayLevel { blocks: Vec::new(), used_indicies: 0 } } } @@ -98,7 +97,10 @@ struct BlockOverlay { deleted: Vec, } -fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { +fn insert_values( + values: &mut HashMap, + inserted: Vec<(Key, DBValue)>, +) { for (k, v) in inserted { debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); @@ -118,7 +120,7 @@ fn discard_values(values: &mut HashMap, inserted }, Entry::Vacant(_) => { debug_assert!(false, "Trying to discard missing value"); - } + }, } } } @@ -142,10 +144,12 @@ fn discard_descendants( }; let mut pinned_children = 0; if let Some(level) = first { - while let Some(i) = level.blocks.iter().position(|overlay| parents.get(&overlay.hash) - .expect("there is a parent entry for each entry in levels; qed") - == hash) - { + while let Some(i) = level.blocks.iter().position(|overlay| { + parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") == + hash + }) { let overlay = level.remove(i); let mut num_pinned = discard_descendants( &mut remainder, @@ -153,7 +157,7 @@ fn discard_descendants( parents, pinned, pinned_insertions, - &overlay.hash + &overlay.hash, ); if pinned.contains_key(&overlay.hash) { num_pinned += 1; @@ -175,10 +179,11 @@ fn discard_descendants( impl NonCanonicalOverlay { /// Creates a new instance. Does not expect any metadata to be present in the DB. pub fn new(db: &D) -> Result, Error> { - let last_canonicalized = db.get_meta(&to_meta_key(LAST_CANONICAL, &())) - .map_err(|e| Error::Db(e))?; + let last_canonicalized = + db.get_meta(&to_meta_key(LAST_CANONICAL, &())).map_err(|e| Error::Db(e))?; let last_canonicalized = last_canonicalized - .map(|buffer| <(BlockHash, u64)>::decode(&mut buffer.as_slice())).transpose()?; + .map(|buffer| <(BlockHash, u64)>::decode(&mut buffer.as_slice())) + .transpose()?; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); let mut values = HashMap::new(); @@ -189,16 +194,17 @@ impl NonCanonicalOverlay { block += 1; loop { let mut level = OverlayLevel::new(); - for index in 0 .. MAX_BLOCKS_PER_LEVEL { + for index in 0..MAX_BLOCKS_PER_LEVEL { let journal_key = to_journal_key(block, index); if let Some(record) = db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + let record: JournalRecord = + Decode::decode(&mut record.as_slice())?; let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); let overlay = BlockOverlay { hash: record.hash.clone(), journal_index: index, journal_key, - inserted: inserted, + inserted, deleted: record.deleted, }; insert_values(&mut values, record.inserted); @@ -216,7 +222,7 @@ impl NonCanonicalOverlay { } } if level.blocks.is_empty() { - break; + break } levels.push_back(level); block += 1; @@ -231,38 +237,55 @@ impl NonCanonicalOverlay { pending_insertions: Default::default(), pinned: Default::default(), pinned_insertions: Default::default(), - values: values, + values, }) } /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + pub fn insert( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { // assume that parent was canonicalized let last_canonicalized = (parent_hash.clone(), number - 1); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode())); + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode())); self.last_canonicalized = Some(last_canonicalized); } else if self.last_canonicalized.is_some() { - if number < front_block_number || number >= front_block_number + self.levels.len() as u64 + 1 { + if number < front_block_number || + number >= front_block_number + self.levels.len() as u64 + 1 + { trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", number, front_block_number, front_block_number + self.levels.len() as u64, ); - return Err(Error::InvalidBlockNumber); + return Err(Error::InvalidBlockNumber) } // check for valid parent if inserting on second level or higher if number == front_block_number { - if !self.last_canonicalized.as_ref().map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) { - return Err(Error::InvalidParent); + if !self + .last_canonicalized + .as_ref() + .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) + { + return Err(Error::InvalidParent) } } else if !self.parents.contains_key(&parent_hash) { - return Err(Error::InvalidParent); + return Err(Error::InvalidParent) } } - let level = if self.levels.is_empty() || number == front_block_number + self.levels.len() as u64 { + let level = if self.levels.is_empty() || + number == front_block_number + self.levels.len() as u64 + { self.levels.push_back(OverlayLevel::new()); self.levels.back_mut().expect("can't be empty after insertion; qed") } else { @@ -271,7 +294,7 @@ impl NonCanonicalOverlay { }; if level.blocks.len() >= MAX_BLOCKS_PER_LEVEL as usize { - return Err(Error::TooManySiblingBlocks); + return Err(Error::TooManySiblingBlocks) } let index = level.available_index(); @@ -282,7 +305,7 @@ impl NonCanonicalOverlay { hash: hash.clone(), journal_index: index, journal_key: journal_key.clone(), - inserted: inserted, + inserted, deleted: changeset.deleted.clone(), }; level.push(overlay); @@ -305,15 +328,24 @@ impl NonCanonicalOverlay { level_index: usize, discarded_journals: &mut Vec>, discarded_blocks: &mut Vec, - hash: &BlockHash + hash: &BlockHash, ) { if let Some(level) = self.levels.get(level_index) { level.blocks.iter().for_each(|overlay| { - let parent = self.parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone(); + let parent = self + .parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") + .clone(); if parent == *hash { discarded_journals.push(overlay.journal_key.clone()); discarded_blocks.push(overlay.hash.clone()); - self.discard_journals(level_index + 1, discarded_journals, discarded_blocks, &overlay.hash); + self.discard_journals( + level_index + 1, + discarded_journals, + discarded_blocks, + &overlay.hash, + ); } }); } @@ -326,7 +358,8 @@ impl NonCanonicalOverlay { pub fn last_canonicalized_block_number(&self) -> Option { match self.last_canonicalized.as_ref().map(|&(_, n)| n) { Some(n) => Some(n + self.pending_canonicalizations.len() as u64), - None if !self.pending_canonicalizations.is_empty() => Some(self.pending_canonicalizations.len() as u64), + None if !self.pending_canonicalizations.is_empty() => + Some(self.pending_canonicalizations.len() as u64), _ => None, } } @@ -351,8 +384,12 @@ impl NonCanonicalOverlay { commit: &mut CommitSet, ) -> Result<(), Error> { trace!(target: "state-db", "Canonicalizing {:?}", hash); - let level = self.levels.get(self.pending_canonicalizations.len()).ok_or_else(|| Error::InvalidBlock)?; - let index = level.blocks + let level = self + .levels + .get(self.pending_canonicalizations.len()) + .ok_or_else(|| Error::InvalidBlock)?; + let index = level + .blocks .iter() .position(|overlay| overlay.hash == *hash) .ok_or_else(|| Error::InvalidBlock)?; @@ -365,7 +402,7 @@ impl NonCanonicalOverlay { self.pending_canonicalizations.len() + 1, &mut discarded_journals, &mut discarded_blocks, - &overlay.hash + &overlay.hash, ); } discarded_journals.push(overlay.journal_key.clone()); @@ -374,13 +411,25 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level.blocks[index]; - commit.data.inserted.extend(overlay.inserted.iter() - .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); + commit.data.inserted.extend(overlay.inserted.iter().map(|k| { + ( + k.clone(), + self.values + .get(k) + .expect("For each key in overlays there's a value in values") + .1 + .clone(), + ) + })); commit.data.deleted.extend(overlay.deleted.clone()); commit.meta.deleted.append(&mut discarded_journals); - let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); + let canonicalized = + (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); self.pending_canonicalizations.push(hash.clone()); Ok(()) @@ -391,8 +440,10 @@ impl NonCanonicalOverlay { let count = self.pending_canonicalizations.len() as u64; for hash in self.pending_canonicalizations.drain(..) { trace!(target: "state-db", "Post canonicalizing {:?}", hash); - let level = self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); - let index = level.blocks + let level = + self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); + let index = level + .blocks .iter() .position(|overlay| overlay.hash == hash) .expect("Hash validity is checked in `canonicalize`"); @@ -415,7 +466,8 @@ impl NonCanonicalOverlay { pinned_children += 1; } if pinned_children != 0 { - self.pinned_insertions.insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); + self.pinned_insertions + .insert(overlay.hash.clone(), (overlay.inserted, pinned_children)); } else { self.parents.remove(&overlay.hash); discard_values(&mut self.values, overlay.inserted); @@ -423,7 +475,10 @@ impl NonCanonicalOverlay { } } if let Some(hash) = last { - let last_canonicalized = (hash, self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1)); + let last_canonicalized = ( + hash, + self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1), + ); self.last_canonicalized = Some(last_canonicalized); } } @@ -435,15 +490,15 @@ impl NonCanonicalOverlay { Q: std::hash::Hash + Eq, { if let Some((_, value)) = self.values.get(&key) { - return Some(value.clone()); + return Some(value.clone()) } None } /// Check if the block is in the canonicalization queue. pub fn have_block(&self, hash: &BlockHash) -> bool { - (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) - && !self.pending_canonicalizations.contains(hash) + (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) && + !self.pending_canonicalizations.contains(hash) } /// Revert a single level. Returns commit set that deletes the journal or `None` if not possible. @@ -471,13 +526,13 @@ impl NonCanonicalOverlay { // Check that it does not have any children if (level_index != level_count - 1) && self.parents.values().any(|h| h == hash) { log::debug!(target: "state-db", "Trying to remove block {:?} with children", hash); - return None; + return None } let overlay = level.remove(index); commit.meta.deleted.push(overlay.journal_key); self.parents.remove(&overlay.hash); discard_values(&mut self.values, overlay.inserted); - break; + break } if self.levels.back().map_or(false, |l| l.blocks.is_empty()) { self.levels.pop_back(); @@ -494,9 +549,13 @@ impl NonCanonicalOverlay { for hash in self.pending_insertions.drain(..) { self.parents.remove(&hash); // find a level. When iterating insertions backwards the hash is always last in the level. - let level_index = - self.levels.iter().position(|level| - level.blocks.last().expect("Hash is added in `insert` in reverse order").hash == hash) + let level_index = self + .levels + .iter() + .position(|level| { + level.blocks.last().expect("Hash is added in `insert` in reverse order").hash == + hash + }) .expect("Hash is added in insert"); let overlay_index = self.levels[level_index].blocks.len() - 1; @@ -526,7 +585,7 @@ impl NonCanonicalOverlay { if self.pending_insertions.contains(hash) { // Pinning pending state is not implemented. Pending states // won't be pruned for quite some time anyway, so it's not a big deal. - return; + return } let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { @@ -576,14 +635,17 @@ impl NonCanonicalOverlay { #[cfg(test)] mod tests { - use std::io; + use super::{to_journal_key, NonCanonicalOverlay}; + use crate::{ + test::{make_changeset, make_db}, + ChangeSet, CommitSet, MetaDb, + }; use sp_core::H256; - use super::{NonCanonicalOverlay, to_journal_key}; - use crate::{ChangeSet, CommitSet, MetaDb}; - use crate::test::{make_db, make_changeset}; + use std::io; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&H256::from_low_u64_be(key)) == + Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] @@ -611,7 +673,9 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 2, &H256::default(), ChangeSet::default()) + .unwrap(); overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); } @@ -622,7 +686,9 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); } @@ -633,8 +699,12 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 2, &H256::default(), ChangeSet::default()) + .unwrap(); } #[test] @@ -644,7 +714,9 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); let mut commit = CommitSet::default(); overlay.canonicalize::(&h2, &mut commit).unwrap(); } @@ -655,7 +727,9 @@ mod tests { let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let changeset = make_changeset(&[3, 4], &[2]); - let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); + let insertion = overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(); assert_eq!(insertion.data.inserted.len(), 0); assert_eq!(insertion.data.deleted.len(), 0); assert_eq!(insertion.meta.inserted.len(), 2); @@ -677,7 +751,11 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); assert_eq!(db.meta.len(), 3); @@ -693,7 +771,11 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&h1, &mut commit).unwrap(); @@ -768,7 +850,11 @@ mod tests { let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); let changeset = make_changeset(&[], &[]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(), + ); db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); overlay.apply_pending(); let mut commit = CommitSet::default(); @@ -1035,14 +1121,18 @@ mod tests { let h21 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); + db.commit( + &overlay + .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) + .unwrap(), + ); db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&root, &mut commit).unwrap(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB + overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); overlay.apply_pending(); assert_eq!(overlay.levels.len(), 1); @@ -1056,7 +1146,7 @@ mod tests { assert!(contains(&overlay, 21)); let mut commit = CommitSet::default(); - overlay.canonicalize::(&h21, &mut commit).unwrap(); // h11 should stay in the DB + overlay.canonicalize::(&h21, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); overlay.apply_pending(); assert!(!contains(&overlay, 21)); @@ -1073,19 +1163,23 @@ mod tests { let h21 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); + db.commit( + &overlay + .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) + .unwrap(), + ); db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&root, &mut commit).unwrap(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB + overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB db.commit(&commit); overlay.apply_pending(); // add another block at top level. It should reuse journal index 0 of previously discarded block - let h22 = H256::random(); + let h22 = H256::random(); db.commit(&overlay.insert::(&h22, 12, &h2, make_changeset(&[22], &[])).unwrap()); assert_eq!(overlay.levels[0].blocks[0].journal_index, 1); assert_eq!(overlay.levels[0].blocks[1].journal_index, 0); @@ -1106,7 +1200,11 @@ mod tests { let h21 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); + db.commit( + &overlay + .insert::(&root, 10, &H256::default(), make_changeset(&[], &[])) + .unwrap(), + ); db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); diff --git a/substrate/client/state-db/src/pruning.rs b/substrate/client/state-db/src/pruning.rs index 0c682d8954b13623d4a5af81a8d8bed3cc8bae36..bb0f7f7961446f9aa8a32f3b117370b05d4b4c4d 100644 --- a/substrate/client/state-db/src/pruning.rs +++ b/substrate/client/state-db/src/pruning.rs @@ -24,10 +24,10 @@ //! the death list. //! The changes are journaled in the DB. -use std::collections::{HashMap, HashSet, VecDeque}; -use codec::{Encode, Decode}; -use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; +use crate::{to_meta_key, CommitSet, Error, Hash, MetaDb}; +use codec::{Decode, Encode}; use log::{trace, warn}; +use std::collections::{HashMap, HashSet, VecDeque}; const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; @@ -72,9 +72,11 @@ fn to_journal_key(block: u64) -> Vec { } impl RefWindow { - pub fn new(db: &D, count_insertions: bool) -> Result, Error> { - let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())) - .map_err(|e| Error::Db(e))?; + pub fn new( + db: &D, + count_insertions: bool, + ) -> Result, Error> { + let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())).map_err(|e| Error::Db(e))?; let pending_number: u64 = match last_pruned { Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, None => 0, @@ -83,7 +85,7 @@ impl RefWindow { let mut pruning = RefWindow { death_rows: Default::default(), death_index: Default::default(), - pending_number: pending_number, + pending_number, pending_canonicalizations: 0, pending_prunings: 0, count_insertions, @@ -94,9 +96,15 @@ impl RefWindow { let journal_key = to_journal_key(block); match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + let record: JournalRecord = + Decode::decode(&mut record.as_slice())?; trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + pruning.import( + &record.hash, + journal_key, + record.inserted.into_iter(), + record.deleted, + ); }, None => break, } @@ -105,7 +113,13 @@ impl RefWindow { Ok(pruning) } - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { + fn import>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Vec, + ) { if self.count_insertions { // remove all re-inserted keys from death rows for k in inserted { @@ -120,13 +134,11 @@ impl RefWindow { self.death_index.insert(k.clone(), imported_block); } } - self.death_rows.push_back( - DeathRow { - hash: hash.clone(), - deleted: deleted.into_iter().collect(), - journal_key: journal_key, - } - ); + self.death_rows.push_back(DeathRow { + hash: hash.clone(), + deleted: deleted.into_iter().collect(), + journal_key, + }); } pub fn window_size(&self) -> u64 { @@ -172,23 +184,27 @@ impl RefWindow { Default::default() }; let deleted = ::std::mem::take(&mut commit.data.deleted); - let journal_record = JournalRecord { - hash: hash.clone(), - inserted, - deleted, - }; + let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted }; let block = self.pending_number + self.death_rows.len() as u64; let journal_key = to_journal_key(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); + self.import( + &journal_record.hash, + journal_key, + journal_record.inserted.into_iter(), + journal_record.deleted, + ); self.pending_canonicalizations += 1; } /// Apply all pending changes pub fn apply_pending(&mut self) { self.pending_canonicalizations = 0; - for _ in 0 .. self.pending_prunings { - let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); + for _ in 0..self.pending_prunings { + let pruned = self + .death_rows + .pop_front() + .expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); if self.count_insertions { for k in pruned.deleted.iter() { @@ -219,9 +235,11 @@ impl RefWindow { #[cfg(test)] mod tests { use super::RefWindow; + use crate::{ + test::{make_commit, make_db, TestDb}, + CommitSet, + }; use sp_core::H256; - use crate::CommitSet; - use crate::test::{make_db, make_commit, TestDb}; fn check_journal(pruning: &RefWindow, db: &TestDb) { let restored: RefWindow = RefWindow::new(db, pruning.count_insertions).unwrap(); @@ -419,5 +437,4 @@ mod tests { assert!(db.data_eq(&make_db(&[1, 3]))); assert!(pruning.death_index.is_empty()); } - } diff --git a/substrate/client/state-db/src/test.rs b/substrate/client/state-db/src/test.rs index e1bb6d01c37e458548be82930279a363057b2b61..ad5ce8e874cc7748e7f951ff08bff21fd3323ac2 100644 --- a/substrate/client/state-db/src/test.rs +++ b/substrate/client/state-db/src/test.rs @@ -18,9 +18,9 @@ //! Test utils -use std::collections::HashMap; +use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; +use std::collections::HashMap; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { @@ -67,30 +67,22 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { ChangeSet { inserted: inserted .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) + .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) .collect(), deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), } } pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { - CommitSet { - data: make_changeset(inserted, deleted), - meta: ChangeSet::default(), - } + CommitSet { data: make_changeset(inserted, deleted), meta: ChangeSet::default() } } pub fn make_db(inserted: &[u64]) -> TestDb { TestDb { data: inserted .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) + .map(|v| (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec())) .collect(), meta: Default::default(), } } - diff --git a/substrate/client/sync-state-rpc/src/lib.rs b/substrate/client/sync-state-rpc/src/lib.rs index 4cb4955995540dead89b75c3539069375e55e695..e786a10cd4406d601e2f87325a71a2719226b604 100644 --- a/substrate/client/sync-state-rpc/src/lib.rs +++ b/substrate/client/sync-state-rpc/src/lib.rs @@ -21,16 +21,19 @@ #![deny(unused_crate_dependencies)] -use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_blockchain::HeaderBackend; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; use std::sync::Arc; -use sp_runtime::generic::BlockId; use jsonrpc_derive::rpc; type SharedAuthoritySet = sc_finality_grandpa::SharedAuthoritySet<::Hash, NumberFor>; -type SharedEpochChanges = sc_consensus_epochs::SharedEpochChanges; +type SharedEpochChanges = + sc_consensus_epochs::SharedEpochChanges; #[derive(Debug, thiserror::Error)] #[allow(missing_docs)] @@ -51,11 +54,7 @@ impl From> for jsonrpc_core::Error { Error::JsonRpc(s) => s, _ => error.to_string(), }; - jsonrpc_core::Error { - message, - code: jsonrpc_core::ErrorCode::ServerError(1), - data: None, - } + jsonrpc_core::Error { message, code: jsonrpc_core::ErrorCode::ServerError(1), data: None } } } @@ -64,8 +63,7 @@ impl From> for jsonrpc_core::Error { pub trait SyncStateRpcApi { /// Returns the json-serialized chainspec running the node, with a sync state. #[rpc(name = "sync_state_genSyncSpec", returns = "jsonrpc_core::Value")] - fn system_gen_sync_spec(&self, raw: bool) - -> jsonrpc_core::Result; + fn system_gen_sync_spec(&self, raw: bool) -> jsonrpc_core::Result; } /// The handler for sync state RPC calls. @@ -78,9 +76,9 @@ pub struct SyncStateRpcHandler { } impl SyncStateRpcHandler - where - TBl: BlockT, - TCl: HeaderBackend + sc_client_api::AuxStore + 'static, +where + TBl: BlockT, + TCl: HeaderBackend + sc_client_api::AuxStore + 'static, { /// Create a new handler. pub fn new( @@ -90,21 +88,19 @@ impl SyncStateRpcHandler shared_epoch_changes: SharedEpochChanges, deny_unsafe: sc_rpc_api::DenyUnsafe, ) -> Self { - Self { - chain_spec, client, shared_authority_set, shared_epoch_changes, deny_unsafe, - } + Self { chain_spec, client, shared_authority_set, shared_epoch_changes, deny_unsafe } } fn build_sync_state(&self) -> Result, Error> { let finalized_hash = self.client.info().finalized_hash; - let finalized_header = self.client.header(BlockId::Hash(finalized_hash))? + let finalized_header = self + .client + .header(BlockId::Hash(finalized_hash))? .ok_or_else(|| sp_blockchain::Error::MissingHeader(finalized_hash.to_string()))?; - let finalized_block_weight = sc_consensus_babe::aux_schema::load_block_weight( - &*self.client, - finalized_hash, - )? - .ok_or_else(|| Error::LoadingBlockWeightFailed(finalized_hash))?; + let finalized_block_weight = + sc_consensus_babe::aux_schema::load_block_weight(&*self.client, finalized_hash)? + .ok_or_else(|| Error::LoadingBlockWeightFailed(finalized_hash))?; Ok(sc_chain_spec::LightSyncState { finalized_block_header: finalized_header, @@ -116,26 +112,23 @@ impl SyncStateRpcHandler } impl SyncStateRpcApi for SyncStateRpcHandler - where - TBl: BlockT, - TCl: HeaderBackend + sc_client_api::AuxStore + 'static, +where + TBl: BlockT, + TCl: HeaderBackend + sc_client_api::AuxStore + 'static, { - fn system_gen_sync_spec(&self, raw: bool) - -> jsonrpc_core::Result - { + fn system_gen_sync_spec(&self, raw: bool) -> jsonrpc_core::Result { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Err(err.into()); + return Err(err.into()) } let mut chain_spec = self.chain_spec.cloned_box(); - let sync_state = self.build_sync_state() - .map_err(map_error::>)?; + let sync_state = self.build_sync_state().map_err(map_error::>)?; chain_spec.set_light_sync_state(sync_state.to_serializable()); - let string = chain_spec.as_json(raw).map_err(map_error::)?; + let string = chain_spec.as_json(raw).map_err(map_error::)?; - serde_json::from_str(&string).map_err(|err| map_error::(err)) + serde_json::from_str(&string).map_err(|err| map_error::(err)) } } diff --git a/substrate/client/telemetry/src/endpoints.rs b/substrate/client/telemetry/src/endpoints.rs index fe4fa23974a647041ec53d35d49d0c05c51d397a..62e61803119808d99e0096123921797258c44622 100644 --- a/substrate/client/telemetry/src/endpoints.rs +++ b/substrate/client/telemetry/src/endpoints.rs @@ -25,8 +25,7 @@ use serde::{Deserialize, Deserializer, Serialize}; /// The URL string can be either a URL or a multiaddress. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct TelemetryEndpoints( - #[serde(deserialize_with = "url_or_multiaddr_deser")] - pub(crate) Vec<(Multiaddr, u8)>, + #[serde(deserialize_with = "url_or_multiaddr_deser")] pub(crate) Vec<(Multiaddr, u8)>, ); /// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr. @@ -36,21 +35,15 @@ where { Vec::<(String, u8)>::deserialize(deserializer)? .iter() - .map(|e| { - url_to_multiaddr(&e.0) - .map_err(serde::de::Error::custom) - .map(|m| (m, e.1)) - }) + .map(|e| url_to_multiaddr(&e.0).map_err(serde::de::Error::custom).map(|m| (m, e.1))) .collect() } impl TelemetryEndpoints { /// Create a `TelemetryEndpoints` based on a list of `(String, u8)`. pub fn new(endpoints: Vec<(String, u8)>) -> Result { - let endpoints: Result, libp2p::multiaddr::Error> = endpoints - .iter() - .map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))) - .collect(); + let endpoints: Result, libp2p::multiaddr::Error> = + endpoints.iter().map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))).collect(); endpoints.map(Self) } } @@ -72,7 +65,7 @@ fn url_to_multiaddr(url: &str) -> Result { // If not, try the `ws://path/url` format. if let Ok(ma) = libp2p::multiaddr::from_url(url) { - return Ok(ma); + return Ok(ma) } // If we have no clue about the format of that string, assume that we were expecting a @@ -82,8 +75,7 @@ fn url_to_multiaddr(url: &str) -> Result { #[cfg(test)] mod tests { - use super::url_to_multiaddr; - use super::TelemetryEndpoints; + use super::{url_to_multiaddr, TelemetryEndpoints}; use libp2p::Multiaddr; #[test] @@ -96,10 +88,7 @@ mod tests { TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); let mut res: Vec<(Multiaddr, u8)> = vec![]; for (a, b) in endp.iter() { - res.push(( - url_to_multiaddr(a).expect("provided url should be valid"), - *b, - )) + res.push((url_to_multiaddr(a).expect("provided url should be valid"), *b)) } assert_eq!(telem.0, res); } diff --git a/substrate/client/telemetry/src/lib.rs b/substrate/client/telemetry/src/lib.rs index 842d89d7edf070cfab914a615ec0b0d6c38bf15b..5bd839e07495213c85ab5fd0505ceb6aafb852e0 100644 --- a/substrate/client/telemetry/src/lib.rs +++ b/substrate/client/telemetry/src/lib.rs @@ -41,8 +41,10 @@ use libp2p::Multiaddr; use log::{error, warn}; use parking_lot::Mutex; use serde::Serialize; -use std::collections::HashMap; -use std::sync::{atomic, Arc}; +use std::{ + collections::HashMap, + sync::{atomic, Arc}, +}; pub use libp2p::wasm_ext::ExtTransport; pub use log; @@ -191,11 +193,7 @@ impl TelemetryWorker { let input = input.expect("the stream is never closed; qed"); match input { - Register::Telemetry { - id, - endpoints, - connection_message, - } => { + Register::Telemetry { id, endpoints, connection_message } => { let endpoints = endpoints.0; let connection_message = match serde_json::to_value(&connection_message) { @@ -205,10 +203,10 @@ impl TelemetryWorker { obj.insert("id".to_string(), id.into()); obj.insert("payload".to_string(), value.into()); Some(obj) - } + }, Ok(_) => { unreachable!("ConnectionMessage always serialize to an object; qed") - } + }, Err(err) => { log::error!( target: "telemetry", @@ -216,7 +214,7 @@ impl TelemetryWorker { err, ); None - } + }, }; for (addr, verbosity) in endpoints { @@ -225,10 +223,7 @@ impl TelemetryWorker { "Initializing telemetry for: {:?}", addr, ); - node_map - .entry(id.clone()) - .or_default() - .push((verbosity, addr.clone())); + node_map.entry(id.clone()).or_default().push((verbosity, addr.clone())); let node = node_pool.entry(addr.clone()).or_insert_with(|| { Node::new(transport.clone(), addr.clone(), Vec::new(), Vec::new()) @@ -238,32 +233,27 @@ impl TelemetryWorker { pending_connection_notifications.retain(|(addr_b, connection_message)| { if *addr_b == addr { - node.telemetry_connection_notifier - .push(connection_message.clone()); + node.telemetry_connection_notifier.push(connection_message.clone()); false } else { true } }); } - } - Register::Notifier { - addresses, - connection_notifier, - } => { + }, + Register::Notifier { addresses, connection_notifier } => { for addr in addresses { // If the Node has been initialized, we directly push the connection_notifier. // Otherwise we push it to a queue that will be consumed when the connection // initializes, thus ensuring that the connection notifier will be sent to the // Node when it becomes available. if let Some(node) = node_pool.get_mut(&addr) { - node.telemetry_connection_notifier - .push(connection_notifier.clone()); + node.telemetry_connection_notifier.push(connection_notifier.clone()); } else { pending_connection_notifications.push((addr, connection_notifier.clone())); } } - } + }, } } @@ -297,12 +287,12 @@ impl TelemetryWorker { message, )), ); - return; + return }; for (node_max_verbosity, addr) in nodes { if verbosity > *node_max_verbosity { - continue; + continue } if let Some(node) = node_pool.get_mut(&addr) { @@ -376,11 +366,7 @@ impl Telemetry { let endpoints = self.endpoints.take().ok_or_else(|| Error::TelemetryAlreadyInitialized)?; self.register_sender - .unbounded_send(Register::Telemetry { - id: self.id, - endpoints, - connection_message, - }) + .unbounded_send(Register::Telemetry { id: self.id, endpoints, connection_message }) .map_err(|_| Error::TelemetryWorkerDropped) } @@ -407,12 +393,8 @@ pub struct TelemetryHandle { impl TelemetryHandle { /// Send telemetry messages. pub fn send_telemetry(&self, verbosity: VerbosityLevel, payload: TelemetryPayload) { - match self - .message_sender - .lock() - .try_send((self.id, verbosity, payload)) - { - Ok(()) => {} + match self.message_sender.lock().try_send((self.id, verbosity, payload)) { + Ok(()) => {}, Err(err) if err.is_full() => log::trace!( target: "telemetry", "Telemetry channel full.", @@ -461,15 +443,8 @@ impl TelemetryConnectionNotifier { #[derive(Debug)] enum Register { - Telemetry { - id: Id, - endpoints: TelemetryEndpoints, - connection_message: ConnectionMessage, - }, - Notifier { - addresses: Vec, - connection_notifier: ConnectionNotifierSender, - }, + Telemetry { id: Id, endpoints: TelemetryEndpoints, connection_message: ConnectionMessage }, + Notifier { addresses: Vec, connection_notifier: ConnectionNotifierSender }, } /// Report a telemetry. diff --git a/substrate/client/telemetry/src/node.rs b/substrate/client/telemetry/src/node.rs index 9ac7ada4e5d6602c310b91eac67a0f909a52b1c8..9e5738cb847730190554b8f90398e372c4cc4e2e 100644 --- a/substrate/client/telemetry/src/node.rs +++ b/substrate/client/telemetry/src/node.rs @@ -17,12 +17,15 @@ // along with this program. If not, see . use crate::TelemetryPayload; -use futures::channel::mpsc; -use futures::prelude::*; -use libp2p::core::transport::Transport; -use libp2p::Multiaddr; +use futures::{channel::mpsc, prelude::*}; +use libp2p::{core::transport::Transport, Multiaddr}; use rand::Rng as _; -use std::{fmt, mem, pin::Pin, task::Context, task::Poll, time::Duration}; +use std::{ + fmt, mem, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Delay; pub(crate) type ConnectionNotifierSender = mpsc::Sender<()>; @@ -122,7 +125,7 @@ where ) -> Poll> { while let Some(item) = conn.buf.pop() { if let Err(e) = conn.sink.start_send_unpin(item) { - return Poll::Ready(Err(e)); + return Poll::Ready(Err(e)) } futures::ready!(conn.sink.poll_ready_unpin(cx))?; } @@ -152,25 +155,25 @@ where Poll::Ready(Err(err)) => { log::warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); socket = NodeSocket::wait_reconnect(); - } + }, Poll::Ready(Ok(())) => { self.socket = NodeSocket::Connected(conn); - return Poll::Ready(Ok(())); - } + return Poll::Ready(Ok(())) + }, Poll::Pending => { self.socket = NodeSocket::Connected(conn); - return Poll::Pending; - } + return Poll::Pending + }, } - } + }, Poll::Ready(Err(err)) => { log::warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); socket = NodeSocket::wait_reconnect(); - } + }, Poll::Pending => { self.socket = NodeSocket::Connected(conn); - return Poll::Pending; - } + return Poll::Pending + }, }, NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { Poll::Ready(Ok(sink)) => { @@ -201,39 +204,39 @@ where err, ); None - } + }, }) .collect(); socket = NodeSocket::Connected(NodeSocketConnected { sink, buf }); - } + }, Poll::Pending => break NodeSocket::Dialing(s), Poll::Ready(Err(err)) => { log::warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); socket = NodeSocket::wait_reconnect(); - } + }, }, NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { Ok(d) => { log::trace!(target: "telemetry", "Re-dialing {}", self.addr); socket = NodeSocket::Dialing(d); - } + }, Err(err) => { log::warn!(target: "telemetry", "❌ Error while re-dialing {}: {:?}", self.addr, err); socket = NodeSocket::wait_reconnect(); - } + }, }, NodeSocket::WaitingReconnect(mut s) => { if let Poll::Ready(_) = Future::poll(Pin::new(&mut s), cx) { socket = NodeSocket::ReconnectNow; } else { - break NodeSocket::WaitingReconnect(s); + break NodeSocket::WaitingReconnect(s) } - } + }, NodeSocket::Poisoned => { log::error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); - break NodeSocket::Poisoned; - } + break NodeSocket::Poisoned + }, } }; @@ -250,7 +253,7 @@ where Ok(data) => { log::trace!(target: "telemetry", "Sending {} bytes", data.len()); let _ = conn.sink.start_send_unpin(data); - } + }, Err(err) => log::debug!( target: "telemetry", "Could not serialize payload: {}", @@ -262,7 +265,7 @@ where // A new connection should be started as soon as possible. NodeSocket::ReconnectNow => log::trace!(target: "telemetry", "Reconnecting"), // Waiting before attempting to dial again. - NodeSocket::WaitingReconnect(_) => {} + NodeSocket::WaitingReconnect(_) => {}, // Temporary transition state. NodeSocket::Poisoned => log::trace!(target: "telemetry", "Poisoned"), } @@ -280,7 +283,7 @@ where log::trace!(target: "telemetry", "[poll_flush] Error: {:?}", e); self.socket = NodeSocket::wait_reconnect(); Poll::Ready(Ok(())) - } + }, Poll::Ready(Ok(())) => Poll::Ready(Ok(())), Poll::Pending => Poll::Pending, }, diff --git a/substrate/client/telemetry/src/transport.rs b/substrate/client/telemetry/src/transport.rs index 0aed263a7275d9695aed310377b80c2785c03f74..2c309be0ffb68d5c8764d7ea05ee6ebf279f7d2b 100644 --- a/substrate/client/telemetry/src/transport.rs +++ b/substrate/client/telemetry/src/transport.rs @@ -26,9 +26,7 @@ use libp2p::{ core::transport::{timeout::TransportTimeout, OptionalTransport}, wasm_ext, Transport, }; -use std::io; -use std::pin::Pin; -use std::time::Duration; +use std::{io, pin::Pin, time::Duration}; /// Timeout after which a connection attempt is considered failed. Includes the WebSocket HTTP /// upgrading. @@ -111,7 +109,7 @@ impl Stream for StreamSink { Ok(n) => { buf.truncate(n); Poll::Ready(Some(Ok(buf))) - } + }, Err(err) => Poll::Ready(Some(Err(err))), } } @@ -126,7 +124,7 @@ impl StreamSink { log::error!(target: "telemetry", "Detected some internal buffering happening in the telemetry"); let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); - return Poll::Ready(Err(err)); + return Poll::Ready(Err(err)) } } diff --git a/substrate/client/tracing/proc-macro/src/lib.rs b/substrate/client/tracing/proc-macro/src/lib.rs index 7022d394ed9545d75ff71371038da6d5ecf0fbda..e9a4f58705b41ed412756e6212b6dca38e96e001 100644 --- a/substrate/client/tracing/proc-macro/src/lib.rs +++ b/substrate/client/tracing/proc-macro/src/lib.rs @@ -113,7 +113,7 @@ pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream { "missing argument: name of the node. Example: sc_cli::prefix_logs_with()", ) .to_compile_error() - .into(); + .into() } let name = syn::parse_macro_input!(arg as Expr); @@ -124,12 +124,7 @@ pub fn prefix_logs_with(arg: TokenStream, item: TokenStream) -> TokenStream { Err(e) => return Error::new(Span::call_site(), e).to_compile_error().into(), }; - let ItemFn { - attrs, - vis, - sig, - block, - } = item_fn; + let ItemFn { attrs, vis, sig, block } = item_fn; (quote! { #(#attrs)* diff --git a/substrate/client/tracing/src/block/mod.rs b/substrate/client/tracing/src/block/mod.rs index cd5cf1052004bc960f0aee9f5725819cd0a2b664..57d648619fbeceded47537913737556da235f234 100644 --- a/substrate/client/tracing/src/block/mod.rs +++ b/substrate/client/tracing/src/block/mod.rs @@ -16,23 +16,34 @@ //! Utilities for tracing block execution -use std::{collections::HashMap, sync::{Arc, atomic::{AtomicU64, Ordering}}, time::Instant}; +use std::{ + collections::HashMap, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + time::Instant, +}; use parking_lot::Mutex; -use tracing::{Dispatch, dispatcher, Subscriber, Level, span::{Attributes, Record, Id}}; +use tracing::{ + dispatcher, + span::{Attributes, Id, Record}, + Dispatch, Level, Subscriber, +}; +use crate::{SpanDatum, TraceEvent, Values}; use sc_client_api::BlockBackend; use sc_rpc_server::RPC_MAX_PAYLOAD_DEFAULT; -use sp_api::{Core, Metadata, ProvideRuntimeApi, Encode}; +use sp_api::{Core, Encode, Metadata, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; +use sp_core::hexdisplay::HexDisplay; +use sp_rpc::tracing::{BlockTrace, Span, TraceBlockResponse, TraceError}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header}, }; -use sp_rpc::tracing::{BlockTrace, Span, TraceError, TraceBlockResponse}; use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; -use sp_core::hexdisplay::HexDisplay; -use crate::{SpanDatum, TraceEvent, Values}; // Heuristic for average event size in bytes. const AVG_EVENT: usize = 600 * 8; @@ -53,7 +64,7 @@ const BASE_PAYLOAD: usize = 100; const DEFAULT_TARGETS: &str = "pallet,frame,state"; const TRACE_TARGET: &str = "block_trace"; // The name of a field required for all events. -const REQUIRED_EVENT_FIELD: &str = "method"; +const REQUIRED_EVENT_FIELD: &str = "method"; const MEGABYTE: usize = 1024 * 1024; /// Tracing Block Result type alias @@ -69,7 +80,7 @@ pub enum Error { #[error("Missing block component: {0}")] MissingBlockComponent(String), #[error("Dispatch error: {0}")] - Dispatch(String) + Dispatch(String), } struct BlockSubscriber { @@ -82,10 +93,7 @@ struct BlockSubscriber { impl BlockSubscriber { fn new(targets: &str) -> Self { let next_id = AtomicU64::new(1); - let mut targets: Vec<_> = targets - .split(',') - .map(crate::parse_target) - .collect(); + let mut targets: Vec<_> = targets.split(',').map(crate::parse_target).collect(); // Ensure that WASM traces are always enabled // Filtering happens when decoding the actual target / level targets.push((WASM_TRACE_IDENTIFIER.to_owned(), Level::TRACE)); @@ -101,11 +109,11 @@ impl BlockSubscriber { impl Subscriber for BlockSubscriber { fn enabled(&self, metadata: &tracing::Metadata<'_>) -> bool { if !metadata.is_span() && !metadata.fields().field(REQUIRED_EVENT_FIELD).is_some() { - return false; + return false } for (target, level) in &self.targets { if metadata.level() <= level && metadata.target().starts_with(target) { - return true; + return true } } false @@ -125,7 +133,7 @@ impl Subscriber for BlockSubscriber { line: attrs.metadata().line().unwrap_or(0), start_time: Instant::now(), values, - overall_time: Default::default() + overall_time: Default::default(), }; self.spans.lock().insert(id.clone(), span); @@ -158,11 +166,9 @@ impl Subscriber for BlockSubscriber { self.events.lock().push(trace_event); } - fn enter(&self, _id: &Id) { - } + fn enter(&self, _id: &Id) {} - fn exit(&self, _span: &Id) { - } + fn exit(&self, _span: &Id) {} } /// Holds a reference to the client in order to execute the given block. @@ -179,11 +185,15 @@ pub struct BlockExecutor { } impl BlockExecutor - where - Block: BlockT + 'static, - Client: HeaderBackend + BlockBackend + ProvideRuntimeApi - + Send + Sync + 'static, - Client::Api: Metadata, +where + Block: BlockT + 'static, + Client: HeaderBackend + + BlockBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: Metadata, { /// Create a new `BlockExecutor` pub fn new( @@ -193,7 +203,8 @@ impl BlockExecutor storage_keys: Option, rpc_max_payload: Option, ) -> Self { - let rpc_max_payload = rpc_max_payload.map(|mb| mb.saturating_mul(MEGABYTE)) + let rpc_max_payload = rpc_max_payload + .map(|mb| mb.saturating_mul(MEGABYTE)) .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); Self { client, block, targets, storage_keys, rpc_max_payload } } @@ -205,10 +216,14 @@ impl BlockExecutor tracing::debug!(target: "state_tracing", "Tracing block: {}", self.block); // Prepare the block let id = BlockId::Hash(self.block); - let mut header = self.client.header(id) + let mut header = self + .client + .header(id) .map_err(|e| Error::InvalidBlockId(e))? .ok_or_else(|| Error::MissingBlockComponent("Header not found".to_string()))?; - let extrinsics = self.client.block_body(&id) + let extrinsics = self + .client + .block_body(&id) .map_err(|e| Error::InvalidBlockId(e))? .ok_or_else(|| Error::MissingBlockComponent("Extrinsics not found".to_string()))?; tracing::debug!(target: "state_tracing", "Found {} extrinsics", extrinsics.len()); @@ -231,45 +246,46 @@ impl BlockExecutor ); let _guard = dispatcher_span.enter(); if let Err(e) = dispatcher::with_default(&dispatch, || { - let span = tracing::info_span!( - target: TRACE_TARGET, - "trace_block", - ); + let span = tracing::info_span!(target: TRACE_TARGET, "trace_block",); let _enter = span.enter(); self.client.runtime_api().execute_block(&parent_id, block) }) { - return Err(Error::Dispatch(format!("Failed to collect traces and execute block: {:?}", e).to_string())); + return Err(Error::Dispatch( + format!("Failed to collect traces and execute block: {:?}", e).to_string(), + )) } } - let block_subscriber = dispatch.downcast_ref::() - .ok_or(Error::Dispatch( - "Cannot downcast Dispatch to BlockSubscriber after tracing block".to_string() + let block_subscriber = + dispatch.downcast_ref::().ok_or(Error::Dispatch( + "Cannot downcast Dispatch to BlockSubscriber after tracing block".to_string(), ))?; - let spans: Vec<_> = block_subscriber.spans + let spans: Vec<_> = block_subscriber + .spans .lock() .drain() // Patch wasm identifiers .filter_map(|(_, s)| patch_and_filter(SpanDatum::from(s), targets)) .collect(); - let events: Vec<_> = block_subscriber.events + let events: Vec<_> = block_subscriber + .events .lock() .drain(..) - .filter(|e| self.storage_keys - .as_ref() - .map(|keys| event_key_filter(e, keys)) - .unwrap_or(false) - ) + .filter(|e| { + self.storage_keys + .as_ref() + .map(|keys| event_key_filter(e, keys)) + .unwrap_or(false) + }) .map(|s| s.into()) .collect(); tracing::debug!(target: "state_tracing", "Captured {} spans and {} events", spans.len(), events.len()); let approx_payload_size = BASE_PAYLOAD + events.len() * AVG_EVENT + spans.len() * AVG_SPAN; let response = if approx_payload_size > self.rpc_max_payload { - TraceBlockResponse::TraceError(TraceError { - error: - "Payload likely exceeds max payload size of RPC server.".to_string() - }) + TraceBlockResponse::TraceError(TraceError { + error: "Payload likely exceeds max payload size of RPC server.".to_string(), + }) } else { TraceBlockResponse::BlockTrace(BlockTrace { block_hash: block_id_as_string(id), @@ -286,14 +302,16 @@ impl BlockExecutor } fn event_key_filter(event: &TraceEvent, storage_keys: &str) -> bool { - event.values.string_values.get("key") + event + .values + .string_values + .get("key") .and_then(|key| Some(check_target(storage_keys, key, &event.level))) .unwrap_or(false) } /// Filter out spans that do not match our targets and if the span is from WASM update its `name` /// and `target` fields to the WASM values for those fields. -// // The `tracing` crate requires trace metadata to be static. This does not work for wasm code in // substrate, as it is regularly updated with new code from on-chain events. The workaround for this // is for substrate's WASM tracing wrappers to put the `name` and `target` data in the `values` map @@ -310,7 +328,7 @@ fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { span.target = t; } if !check_target(targets, &span.target, &span.level) { - return None; + return None } } Some(span.into()) @@ -320,15 +338,15 @@ fn patch_and_filter(mut span: SpanDatum, targets: &str) -> Option { fn check_target(targets: &str, target: &str, level: &Level) -> bool { for (t, l) in targets.split(',').map(crate::parse_target) { if target.starts_with(t.as_str()) && level <= &l { - return true; + return true } } false } fn block_id_as_string(block_id: BlockId) -> String { - match block_id { + match block_id { BlockId::Hash(h) => HexDisplay::from(&h.encode()).to_string(), - BlockId::Number(n) => HexDisplay::from(&n.encode()).to_string() + BlockId::Number(n) => HexDisplay::from(&n.encode()).to_string(), } } diff --git a/substrate/client/tracing/src/lib.rs b/substrate/client/tracing/src/lib.rs index 9f02bb96e4f77527caaa7921810739fc63902873..bf6e3d780c6ed9014d7aa60adaead6d172d8745f 100644 --- a/substrate/client/tracing/src/lib.rs +++ b/substrate/client/tracing/src/lib.rs @@ -34,8 +34,10 @@ pub mod logging; use rustc_hash::FxHashMap; use serde::ser::{Serialize, SerializeMap, Serializer}; use sp_tracing::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; -use std::fmt; -use std::time::{Duration, Instant}; +use std::{ + fmt, + time::{Duration, Instant}, +}; use tracing::{ event::Event, field::{Field, Visit}, @@ -43,8 +45,10 @@ use tracing::{ subscriber::Subscriber, Level, }; -use tracing_subscriber::layer::{Context, Layer}; -use tracing_subscriber::registry::LookupSpan; +use tracing_subscriber::{ + layer::{Context, Layer}, + registry::LookupSpan, +}; #[doc(hidden)] pub use tracing; @@ -137,10 +141,10 @@ impl Values { /// Checks if all individual collections are empty pub fn is_empty(&self) -> bool { - self.bool_values.is_empty() - && self.i64_values.is_empty() - && self.u64_values.is_empty() - && self.string_values.is_empty() + self.bool_values.is_empty() && + self.i64_values.is_empty() && + self.u64_values.is_empty() && + self.string_values.is_empty() } } @@ -162,15 +166,20 @@ impl Visit for Values { } fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - self.string_values.insert(field.name().to_string(), format!("{:?}", value).to_owned()); + self.string_values + .insert(field.name().to_string(), format!("{:?}", value).to_owned()); } } impl Serialize for Values { fn serialize(&self, serializer: S) -> Result - where S: Serializer, + where + S: Serializer, { - let len = self.bool_values.len() + self.i64_values.len() + self.u64_values.len() + self.string_values.len(); + let len = self.bool_values.len() + + self.i64_values.len() + + self.u64_values.len() + + self.string_values.len(); let mut map = serializer.serialize_map(Some(len))?; for (k, v) in &self.bool_values { map.serialize_entry(k, v)?; @@ -194,7 +203,12 @@ impl fmt::Display for Values { let i64_iter = self.i64_values.iter().map(|(k, v)| format!("{}={}", k, v)); let u64_iter = self.u64_values.iter().map(|(k, v)| format!("{}={}", k, v)); let string_iter = self.string_values.iter().map(|(k, v)| format!("{}=\"{}\"", k, v)); - let values = bool_iter.chain(i64_iter).chain(u64_iter).chain(string_iter).collect::>().join(", "); + let values = bool_iter + .chain(i64_iter) + .chain(u64_iter) + .chain(string_iter) + .collect::>() + .join(", "); write!(f, "{}", values) } } @@ -217,16 +231,13 @@ impl ProfilingLayer { /// wasm_tracing indicates whether to enable wasm traces pub fn new_with_handler(trace_handler: Box, targets: &str) -> Self { let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); - Self { - targets, - trace_handler, - } + Self { targets, trace_handler } } fn check_target(&self, target: &str, level: &Level) -> bool { for t in &self.targets { if target.starts_with(t.0.as_str()) && level <= &t.1 { - return true; + return true } } false @@ -245,8 +256,8 @@ fn parse_target(s: &str) -> (String, Level) { } else { (target, Level::TRACE) } - } - None => (s.to_string(), Level::TRACE) + }, + None => (s.to_string(), Level::TRACE), } } @@ -329,10 +340,7 @@ where if let Some(mut span_datum) = extensions.remove::() { span_datum.overall_time += end_time - span_datum.start_time; if span_datum.name == WASM_TRACE_IDENTIFIER { - span_datum - .values - .bool_values - .insert("wasm".to_owned(), true); + span_datum.values.bool_values.insert("wasm".to_owned(), true); if let Some(n) = span_datum.values.string_values.remove(WASM_NAME_KEY) { span_datum.name = n; } @@ -404,13 +412,11 @@ impl TraceHandler for LogTraceHandler { impl From for sp_rpc::tracing::Event { fn from(trace_event: TraceEvent) -> Self { - let data = sp_rpc::tracing::Data { - string_values: trace_event.values.string_values - }; + let data = sp_rpc::tracing::Data { string_values: trace_event.values.string_values }; sp_rpc::tracing::Event { target: trace_event.target, data, - parent_id: trace_event.parent_id.map(|id| id.into_u64()) + parent_id: trace_event.parent_id.map(|id| id.into_u64()), } } } @@ -453,18 +459,12 @@ mod tests { fn setup_subscriber() -> ( impl tracing::Subscriber + Send + Sync, Arc>>, - Arc>> + Arc>>, ) { let spans = Arc::new(Mutex::new(Vec::new())); let events = Arc::new(Mutex::new(Vec::new())); - let handler = TestTraceHandler { - spans: spans.clone(), - events: events.clone(), - }; - let layer = ProfilingLayer::new_with_handler( - Box::new(handler), - "test_target", - ); + let handler = TestTraceHandler { spans: spans.clone(), events: events.clone() }; + let layer = ProfilingLayer::new_with_handler(Box::new(handler), "test_target"); let subscriber = tracing_subscriber::fmt().with_writer(std::io::sink).finish().with(layer); (subscriber, spans, events) } @@ -542,7 +542,10 @@ mod tests { let _sub_guard = tracing::subscriber::set_default(sub); tracing::event!(target: "test_target", tracing::Level::INFO, "test_event"); let mut te1 = events.lock().remove(0); - assert_eq!(te1.values.string_values.remove(&"message".to_owned()).unwrap(), "test_event".to_owned()); + assert_eq!( + te1.values.string_values.remove(&"message".to_owned()).unwrap(), + "test_event".to_owned() + ); } #[test] @@ -557,7 +560,7 @@ mod tests { // emit event tracing::event!(target: "test_target", tracing::Level::INFO, "test_event"); - //exit span + // exit span drop(_guard1); drop(span1); @@ -596,7 +599,7 @@ mod tests { tracing::event!(target: "test_target", tracing::Level::INFO, "test_event1"); for msg in rx.recv() { if msg == false { - break; + break } } // guard2 and span2 dropped / exited diff --git a/substrate/client/tracing/src/logging/directives.rs b/substrate/client/tracing/src/logging/directives.rs index 0e6d949a41391bf721e34854525c8ec5519ad8bf..5aaeb4d17e7d332ccc8c02793a5e3cf9c293ac9c 100644 --- a/substrate/client/tracing/src/logging/directives.rs +++ b/substrate/client/tracing/src/logging/directives.rs @@ -63,12 +63,7 @@ pub fn reload_filter() -> Result<(), String> { let mut env_filter = EnvFilter::default(); if let Some(current_directives) = CURRENT_DIRECTIVES.get() { // Use join and then split in case any directives added together - for directive in current_directives - .lock() - .join(",") - .split(',') - .map(|d| d.parse()) - { + for directive in current_directives.lock().join(",").split(',').map(|d| d.parse()) { match directive { Ok(dir) => env_filter = env_filter.add_directive(dir), Err(invalid_directive) => { @@ -77,7 +72,7 @@ pub fn reload_filter() -> Result<(), String> { "Unable to parse directive while setting log filter: {:?}", invalid_directive, ); - } + }, } } } @@ -99,14 +94,9 @@ pub fn reload_filter() -> Result<(), String> { /// /// Includes substrate defaults and CLI supplied directives. pub fn reset_log_filter() -> Result<(), String> { - let directive = DEFAULT_DIRECTIVES - .get_or_init(|| Mutex::new(Vec::new())) - .lock() - .clone(); + let directive = DEFAULT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock().clone(); - *CURRENT_DIRECTIVES - .get_or_init(|| Mutex::new(Vec::new())) - .lock() = directive; + *CURRENT_DIRECTIVES.get_or_init(|| Mutex::new(Vec::new())).lock() = directive; reload_filter() } diff --git a/substrate/client/tracing/src/logging/event_format.rs b/substrate/client/tracing/src/logging/event_format.rs index 5e7a5246cca00d8cbd38426f186a1d1a9e3a5205..01847bc2b5cb488c76813f216adc946336eb009a 100644 --- a/substrate/client/tracing/src/logging/event_format.rs +++ b/substrate/client/tracing/src/logging/event_format.rs @@ -79,11 +79,11 @@ where match current_thread.name() { Some(name) => { write!(writer, "{} ", FmtThreadName::new(name))?; - } + }, // fall-back to thread id when name is absent and ids are not enabled None => { write!(writer, "{:0>2?} ", current_thread.id())?; - } + }, } } @@ -98,7 +98,7 @@ where let exts = span.extensions(); if let Some(prefix) = exts.get::() { write!(writer, "{}", prefix.as_str())?; - break; + break } } } @@ -125,11 +125,11 @@ where writer: &mut dyn fmt::Write, event: &Event, ) -> fmt::Result { - if self.dup_to_stdout && ( - event.metadata().level() == &Level::INFO || - event.metadata().level() == &Level::WARN || - event.metadata().level() == &Level::ERROR - ) { + if self.dup_to_stdout && + (event.metadata().level() == &Level::INFO || + event.metadata().level() == &Level::WARN || + event.metadata().level() == &Level::ERROR) + { let mut out = String::new(); self.format_event_custom(CustomFmtContext::FmtContext(ctx), &mut out, event)?; writer.write_str(&out)?; @@ -271,9 +271,8 @@ where ) -> fmt::Result { match self { CustomFmtContext::FmtContext(fmt_ctx) => fmt_ctx.format_fields(writer, fields), - CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => { - fmt_fields.format_fields(writer, fields) - } + CustomFmtContext::ContextWithFormatFields(_ctx, fmt_fields) => + fmt_fields.format_fields(writer, fields), } } } @@ -321,11 +320,7 @@ impl<'a> fmt::Write for MaybeColorWriter<'a> { impl<'a> MaybeColorWriter<'a> { /// Creates a new instance. fn new(enable_color: bool, inner_writer: &'a mut dyn fmt::Write) -> Self { - Self { - enable_color, - inner_writer, - buffer: String::new(), - } + Self { enable_color, inner_writer, buffer: String::new() } } /// Write the buffered content to the `inner_writer`. diff --git a/substrate/client/tracing/src/logging/layers/console_log.rs b/substrate/client/tracing/src/logging/layers/console_log.rs index be992ae814235b9dfa6e16e7e33c75ca3aa6140c..77295110c896216552b94d2202931ea0e1c10fb5 100644 --- a/substrate/client/tracing/src/logging/layers/console_log.rs +++ b/substrate/client/tracing/src/logging/layers/console_log.rs @@ -40,11 +40,7 @@ pub struct ConsoleLogLayer ConsoleLogLayer { /// Create a new [`ConsoleLogLayer`] using the `EventFormat` provided in argument. pub fn new(event_format: EventFormat) -> Self { - Self { - event_format, - fmt_fields: Default::default(), - _inner: std::marker::PhantomData, - } + Self { event_format, fmt_fields: Default::default(), _inner: std::marker::PhantomData } } } @@ -90,11 +86,11 @@ where Ok(buf) => { a = buf; &mut *a - } + }, _ => { b = String::new(); &mut b - } + }, }; if self.format_event(&ctx, &mut buf, event).is_ok() { diff --git a/substrate/client/tracing/src/logging/layers/prefix_layer.rs b/substrate/client/tracing/src/logging/layers/prefix_layer.rs index f35b59e8b9af900716e8a3e92f4122725f86105c..2ad786a0922335bcbb4521656da2c95c089198a2 100644 --- a/substrate/client/tracing/src/logging/layers/prefix_layer.rs +++ b/substrate/client/tracing/src/logging/layers/prefix_layer.rs @@ -42,12 +42,12 @@ where "newly created span with ID {:?} did not exist in the registry; this is a bug!", id ); - return; - } + return + }, }; if span.name() != PREFIX_LOG_SPAN { - return; + return } let mut extensions = span.extensions_mut(); diff --git a/substrate/client/tracing/src/logging/mod.rs b/substrate/client/tracing/src/logging/mod.rs index a3fa3a531b3e4f78cbe9857ba7ddd564367dcae5..3d3b40a14d9fabd71755f9a0c02108bb97425f97 100644 --- a/substrate/client/tracing/src/logging/mod.rs +++ b/substrate/client/tracing/src/logging/mod.rs @@ -33,10 +33,9 @@ use std::io; use tracing::Subscriber; use tracing_subscriber::{ filter::LevelFilter, - fmt::time::ChronoLocal, fmt::{ - format, FormatEvent, FormatFields, Formatter, Layer as FmtLayer, MakeWriter, - SubscriberBuilder, + format, time::ChronoLocal, FormatEvent, FormatFields, Formatter, Layer as FmtLayer, + MakeWriter, SubscriberBuilder, }, layer::{self, SubscriberExt}, registry::LookupSpan, @@ -153,9 +152,7 @@ where let max_level_hint = Layer::::max_level_hint(&env_filter); let max_level = to_log_level_filter(max_level_hint); - tracing_log::LogTracer::builder() - .with_max_level(max_level) - .init()?; + tracing_log::LogTracer::builder().with_max_level(max_level).init()?; // If we're only logging `INFO` entries then we'll use a simplified logging format. let simple = match max_level_hint { @@ -276,23 +273,19 @@ impl LoggerBuilder { } } else { if self.log_reloading { - let subscriber = prepare_subscriber( - &self.directives, - None, - self.force_colors, - |builder| enable_log_reloading!(builder), - )?; + let subscriber = + prepare_subscriber(&self.directives, None, self.force_colors, |builder| { + enable_log_reloading!(builder) + })?; tracing::subscriber::set_global_default(subscriber)?; Ok(()) } else { - let subscriber = prepare_subscriber( - &self.directives, - None, - self.force_colors, - |builder| builder, - )?; + let subscriber = + prepare_subscriber(&self.directives, None, self.force_colors, |builder| { + builder + })?; tracing::subscriber::set_global_default(subscriber)?; @@ -410,12 +403,7 @@ mod tests { .unwrap(); let output = String::from_utf8(output.stderr).unwrap(); - assert!( - re.is_match(output.trim()), - "Expected:\n{}\nGot:\n{}", - re, - output, - ); + assert!(re.is_match(output.trim()), "Expected:\n{}\nGot:\n{}", re, output,); } /// This is not an actual test, it is used by the `prefix_in_log_lines` test. @@ -460,12 +448,7 @@ mod tests { .unwrap(); let output = String::from_utf8(output.stderr).unwrap(); - assert!( - re.is_match(output.trim()), - "Expected:\n{}\nGot:\n{}", - re, - output, - ); + assert!(re.is_match(output.trim()), "Expected:\n{}\nGot:\n{}", re, output,); } #[test] @@ -503,18 +486,9 @@ mod tests { eprint!("MAX_LOG_LEVEL={:?}", log::max_level()); } else { assert_eq!("MAX_LOG_LEVEL=Info", run_test(None, None)); - assert_eq!( - "MAX_LOG_LEVEL=Trace", - run_test(Some("test=trace".into()), None) - ); - assert_eq!( - "MAX_LOG_LEVEL=Debug", - run_test(Some("test=debug".into()), None) - ); - assert_eq!( - "MAX_LOG_LEVEL=Trace", - run_test(None, Some("test=info".into())) - ); + assert_eq!("MAX_LOG_LEVEL=Trace", run_test(Some("test=trace".into()), None)); + assert_eq!("MAX_LOG_LEVEL=Debug", run_test(Some("test=debug".into()), None)); + assert_eq!("MAX_LOG_LEVEL=Trace", run_test(None, Some("test=info".into()))); } } } diff --git a/substrate/client/transaction-pool/api/src/error.rs b/substrate/client/transaction-pool/api/src/error.rs index dd2d6401c1821887837076294b968114dbb164fa..365d6a28d6b9b28f5aeeb4e383d86d3ca176154f 100644 --- a/substrate/client/transaction-pool/api/src/error.rs +++ b/substrate/client/transaction-pool/api/src/error.rs @@ -18,7 +18,7 @@ //! Transaction pool errors. use sp_runtime::transaction_validity::{ - TransactionPriority as Priority, InvalidTransaction, UnknownTransaction, + InvalidTransaction, TransactionPriority as Priority, UnknownTransaction, }; /// Transaction pool result. @@ -52,7 +52,7 @@ pub enum Error { /// Transaction already in the pool. old: Priority, /// Transaction entering the pool. - new: Priority + new: Priority, }, #[error("Transaction with cyclic dependency")] CycleDetected, @@ -78,9 +78,13 @@ pub trait IntoPoolError: std::error::Error + Send + Sized { /// This implementation is optional and used only to /// provide more descriptive error messages for end users /// of RPC API. - fn into_pool_error(self) -> std::result::Result { Err(self) } + fn into_pool_error(self) -> std::result::Result { + Err(self) + } } impl IntoPoolError for Error { - fn into_pool_error(self) -> std::result::Result { Ok(self) } + fn into_pool_error(self) -> std::result::Result { + Ok(self) + } } diff --git a/substrate/client/transaction-pool/api/src/lib.rs b/substrate/client/transaction-pool/api/src/lib.rs index 198d67f71d1b54cafafb569691a51be8ea7d2614..eb9b1b09b899f91d8745829ccd787f72ba13a776 100644 --- a/substrate/client/transaction-pool/api/src/lib.rs +++ b/substrate/client/transaction-pool/api/src/lib.rs @@ -20,21 +20,16 @@ pub mod error; -use std::{ - collections::HashMap, - hash::Hash, - sync::Arc, - pin::Pin, -}; use futures::{Future, Stream}; use serde::{Deserialize, Serialize}; +pub use sp_runtime::transaction_validity::{ + TransactionLongevity, TransactionPriority, TransactionSource, TransactionTag, +}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Member, NumberFor}, }; -pub use sp_runtime::transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionTag, TransactionSource, -}; +use std::{collections::HashMap, hash::Hash, pin::Pin, sync::Arc}; /// Transaction pool status. #[derive(Debug)] @@ -63,20 +58,20 @@ impl PoolStatus { /// /// The status events can be grouped based on their kinds as: /// 1. Entering/Moving within the pool: -/// - `Future` -/// - `Ready` +/// - `Future` +/// - `Ready` /// 2. Inside `Ready` queue: -/// - `Broadcast` +/// - `Broadcast` /// 3. Leaving the pool: -/// - `InBlock` -/// - `Invalid` -/// - `Usurped` -/// - `Dropped` -/// 4. Re-entering the pool: -/// - `Retracted` -/// 5. Block finalized: -/// - `Finalized` -/// - `FinalityTimeout` +/// - `InBlock` +/// - `Invalid` +/// - `Usurped` +/// - `Dropped` +/// 4. Re-entering the pool: +/// - `Retracted` +/// 5. Block finalized: +/// - `Finalized` +/// - `FinalityTimeout` /// /// The events will always be received in the order described above, however /// there might be cases where transactions alternate between `Future` and `Ready` @@ -130,7 +125,8 @@ pub enum TransactionStatus { } /// The stream of transaction events. -pub type TransactionStatusStream = dyn Stream> + Send + Unpin; +pub type TransactionStatusStream = + dyn Stream> + Send + Unpin; /// The import notification event stream. pub type ImportNotificationStream = futures::channel::mpsc::Receiver; @@ -147,7 +143,7 @@ pub type TransactionStatusStreamFor

= TransactionStatusStream, Bloc pub type LocalTransactionFor

= <

::Block as BlockT>::Extrinsic; /// Typical future type used in transaction pool api. -pub type PoolFuture = std::pin::Pin> + Send>>; +pub type PoolFuture = std::pin::Pin> + Send>>; /// In-pool transaction interface. /// @@ -184,7 +180,7 @@ pub trait TransactionPool: Send + Sync { /// In-pool transaction type. type InPoolTransaction: InPoolTransaction< Transaction = TransactionFor, - Hash = TxHash + Hash = TxHash, >; /// Error type. type Error: From + crate::error::IntoPoolError; @@ -220,11 +216,18 @@ pub trait TransactionPool: Send + Sync { /// /// Guarantees to return only when transaction pool got updated at `at` block. /// Guarantees to return immediately when `None` is passed. - fn ready_at(&self, at: NumberFor) - -> Pin> + Send>> + Send>>; + fn ready_at( + &self, + at: NumberFor, + ) -> Pin< + Box< + dyn Future> + Send>> + + Send, + >, + >; /// Get an iterator for ready transactions ordered by priority. - fn ready(&self) -> Box> + Send>; + fn ready(&self) -> Box> + Send>; // *** Block production /// Remove transactions identified by given hashes (and dependent transactions) from the pool. @@ -270,7 +273,7 @@ pub enum ChainEvent { /// Trait for transaction pool maintenance. pub trait MaintainedTransactionPool: TransactionPool { /// Perform maintenance - fn maintain(&self, event: ChainEvent) -> Pin + Send>>; + fn maintain(&self, event: ChainEvent) -> Pin + Send>>; } /// Transaction pool interface for submitting local transactions that exposes a @@ -306,11 +309,7 @@ pub trait OffchainSubmitTransaction: Send + Sync { /// Submit transaction. /// /// The transaction will end up in the pool and be propagated to others. - fn submit_at( - &self, - at: &BlockId, - extrinsic: Block::Extrinsic, - ) -> Result<(), ()>; + fn submit_at(&self, at: &BlockId, extrinsic: Block::Extrinsic) -> Result<(), ()>; } impl OffchainSubmitTransaction for TPool { diff --git a/substrate/client/transaction-pool/benches/basics.rs b/substrate/client/transaction-pool/benches/basics.rs index 6995491ea22c4ee6895ae560812d735ec70c9ca7..cf30a0200ad760a5d207b8fc7de476a682af7577 100644 --- a/substrate/client/transaction-pool/benches/basics.rs +++ b/substrate/client/transaction-pool/benches/basics.rs @@ -18,18 +18,22 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use futures::{future::{ready, Ready}, executor::block_on}; -use sc_transaction_pool::{*, test_helpers::*}; use codec::Encode; -use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; +use futures::{ + executor::block_on, + future::{ready, Ready}, +}; +use sc_transaction_pool::{test_helpers::*, *}; +use sp_core::blake2_256; use sp_runtime::{ - generic::BlockId, traits::Block as BlockT, + generic::BlockId, + traits::Block as BlockT, transaction_validity::{ - ValidTransaction, InvalidTransaction, TransactionValidity, TransactionTag as Tag, - TransactionSource, + InvalidTransaction, TransactionSource, TransactionTag as Tag, TransactionValidity, + ValidTransaction, }, }; -use sp_core::blake2_256; +use substrate_test_runtime::{AccountId, Block, Extrinsic, Transfer, H256}; #[derive(Clone, Debug, Default)] struct TestApi { @@ -65,25 +69,21 @@ impl ChainApi for TestApi { let from = uxt.transfer().from.clone(); match self.block_id_to_number(at) { - Ok(Some(num)) if num > 5 => { - return ready( - Ok(Err(InvalidTransaction::Stale.into())) - ) - }, + Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), _ => {}, } - ready( - Ok(Ok(ValidTransaction { - priority: 4, - requires: if nonce > 1 && self.nonce_dependant { - vec![to_tag(nonce-1, from.clone())] - } else { vec![] }, - provides: vec![to_tag(nonce, from)], - longevity: 10, - propagate: true, - })) - ) + ready(Ok(Ok(ValidTransaction { + priority: 4, + requires: if nonce > 1 && self.nonce_dependant { + vec![to_tag(nonce - 1, from.clone())] + } else { + vec![] + }, + provides: vec![to_tag(nonce, from)], + longevity: 10, + propagate: true, + }))) } fn block_id_to_number( @@ -156,11 +156,7 @@ fn bench_configured(pool: Pool, number: u64) { // Prune all transactions. let block_num = 6; - block_on(pool.prune_tags( - &BlockId::Number(block_num), - tags, - vec![], - )).expect("Prune failed"); + block_on(pool.prune_tags(&BlockId::Number(block_num), tags, vec![])).expect("Prune failed"); // pool is empty assert_eq!(pool.validated_pool().status().ready, 0); @@ -168,7 +164,6 @@ fn bench_configured(pool: Pool, number: u64) { } fn benchmark_main(c: &mut Criterion) { - c.bench_function("sequential 50 tx", |b| { b.iter(|| { bench_configured( diff --git a/substrate/client/transaction-pool/src/api.rs b/substrate/client/transaction-pool/src/api.rs index 2eb394f76d554664fa040e5684a394a5ce323dbd..0d9ec122b87d0925cba0f03f05671bc6e8bcb49d 100644 --- a/substrate/client/transaction-pool/src/api.rs +++ b/substrate/client/transaction-pool/src/api.rs @@ -18,26 +18,35 @@ //! Chain api required for the transaction pool. -use std::{marker::PhantomData, pin::Pin, sync::Arc}; use codec::{Decode, Encode}; use futures::{ - channel::{oneshot, mpsc}, future::{Future, FutureExt, ready, Ready}, lock::Mutex, SinkExt, - StreamExt, + channel::{mpsc, oneshot}, + future::{ready, Future, FutureExt, Ready}, + lock::Mutex, + SinkExt, StreamExt, }; +use std::{marker::PhantomData, pin::Pin, sync::Arc}; +use prometheus_endpoint::Registry as PrometheusRegistry; use sc_client_api::{ - blockchain::HeaderBackend, light::{Fetcher, RemoteCallRequest, RemoteBodyRequest}, BlockBackend, + blockchain::HeaderBackend, + light::{Fetcher, RemoteBodyRequest, RemoteCallRequest}, + BlockBackend, }; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ - generic::BlockId, traits::{self, Block as BlockT, BlockIdTo, Header as HeaderT, Hash as HashT}, - transaction_validity::{TransactionValidity, TransactionSource}, + generic::BlockId, + traits::{self, Block as BlockT, BlockIdTo, Hash as HashT, Header as HeaderT}, + transaction_validity::{TransactionSource, TransactionValidity}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use sp_api::{ProvideRuntimeApi, ApiExt}; -use prometheus_endpoint::Registry as PrometheusRegistry; -use sp_core::traits::SpawnEssentialNamed; -use crate::{metrics::{ApiMetrics, ApiMetricsExt}, error::{self, Error}, graph}; +use crate::{ + error::{self, Error}, + graph, + metrics::{ApiMetrics, ApiMetricsExt}, +}; /// The transaction pool logic for full client. pub struct FullChainApi { @@ -63,7 +72,8 @@ fn spawn_validation_pool_task( Some(task) => task.await, } } - }.boxed(), + } + .boxed(), ); } @@ -74,18 +84,16 @@ impl FullChainApi { prometheus: Option<&PrometheusRegistry>, spawner: &impl SpawnEssentialNamed, ) -> Self { - let metrics = prometheus.map(ApiMetrics::register).and_then(|r| { - match r { - Err(err) => { - log::warn!( - target: "txpool", - "Failed to register transaction pool api prometheus metrics: {:?}", - err, - ); - None - }, - Ok(api) => Some(Arc::new(api)) - } + let metrics = prometheus.map(ApiMetrics::register).and_then(|r| match r { + Err(err) => { + log::warn!( + target: "txpool", + "Failed to register transaction pool api prometheus metrics: {:?}", + err, + ); + None + }, + Ok(api) => Some(Arc::new(api)), }); let (sender, receiver) = mpsc::channel(0); @@ -106,15 +114,15 @@ impl FullChainApi { impl graph::ChainApi for FullChainApi where Block: BlockT, - Client: ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { type Block = Block; type Error = error::Error; - type ValidationFuture = Pin< - Box> + Send> - >; + type ValidationFuture = + Pin> + Send>>; type BodyFuture = Ready::Extrinsic>>>>; fn block_body(&self, id: &BlockId) -> Self::BodyFuture { @@ -136,14 +144,16 @@ where async move { metrics.report(|m| m.validations_scheduled.inc()); - validation_pool.lock() + validation_pool + .lock() .await .send( async move { let res = validate_transaction_blocking(&*client, &at, source, uxt); let _ = tx.send(res); metrics.report(|m| m.validations_finished.inc()); - }.boxed() + } + .boxed(), ) .await .map_err(|e| Error::RuntimeApi(format!("Validation pool down: {:?}", e)))?; @@ -152,30 +162,33 @@ where Ok(r) => r, Err(_) => Err(Error::RuntimeApi("Validation was canceled".into())), } - }.boxed() + } + .boxed() } fn block_id_to_number( &self, at: &BlockId, ) -> error::Result>> { - self.client.to_number(at).map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) + self.client + .to_number(at) + .map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) } fn block_id_to_hash( &self, at: &BlockId, ) -> error::Result>> { - self.client.to_hash(at).map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) + self.client + .to_hash(at) + .map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) } fn hash_and_length( &self, ex: &graph::ExtrinsicFor, ) -> (graph::ExtrinsicHash, usize) { - ex.using_encoded(|x| { - ( as traits::Hash>::hash(x), x.len()) - }) + ex.using_encoded(|x| ( as traits::Hash>::hash(x), x.len())) } fn block_header( @@ -196,7 +209,8 @@ fn validate_transaction_blocking( ) -> error::Result where Block: BlockT, - Client: ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -257,7 +271,8 @@ where impl FullChainApi where Block: BlockT, - Client: ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, + Client: + ProvideRuntimeApi + BlockBackend + BlockIdTo + HeaderBackend, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, { @@ -285,30 +300,25 @@ pub struct LightChainApi { impl LightChainApi { /// Create new transaction pool logic. pub fn new(client: Arc, fetcher: Arc) -> Self { - LightChainApi { - client, - fetcher, - _phantom: Default::default(), - } + LightChainApi { client, fetcher, _phantom: Default::default() } } } -impl graph::ChainApi for - LightChainApi where - Block: BlockT, - Client: HeaderBackend + 'static, - F: Fetcher + 'static, +impl graph::ChainApi for LightChainApi +where + Block: BlockT, + Client: HeaderBackend + 'static, + F: Fetcher + 'static, { type Block = Block; type Error = error::Error; - type ValidationFuture = Box< - dyn Future> + Send + Unpin - >; + type ValidationFuture = + Box> + Send + Unpin>; type BodyFuture = Pin< Box< dyn Future::Extrinsic>>>> - + Send - > + + Send, + >, >; fn validate_transaction( @@ -318,9 +328,11 @@ impl graph::ChainApi for uxt: graph::ExtrinsicFor, ) -> Self::ValidationFuture { let header_hash = self.client.expect_block_hash_from_id(at); - let header_and_hash = header_hash - .and_then(|header_hash| self.client.expect_header(BlockId::Hash(header_hash)) - .map(|header| (header_hash, header))); + let header_and_hash = header_hash.and_then(|header_hash| { + self.client + .expect_header(BlockId::Hash(header_hash)) + .map(|header| (header_hash, header)) + }); let (block, header) = match header_and_hash { Ok((header_hash, header)) => (header_hash, header), Err(err) => return Box::new(ready(Err(err.into()))), @@ -333,13 +345,12 @@ impl graph::ChainApi for retry_count: None, }); let remote_validation_request = remote_validation_request.then(move |result| { - let result: error::Result = result - .map_err(Into::into) - .and_then(|result| Decode::decode(&mut &result[..]) - .map_err(|e| Error::RuntimeApi( - format!("Error decoding tx validation result: {:?}", e) - )) - ); + let result: error::Result = + result.map_err(Into::into).and_then(|result| { + Decode::decode(&mut &result[..]).map_err(|e| { + Error::RuntimeApi(format!("Error decoding tx validation result: {:?}", e)) + }) + }); ready(result) }); @@ -364,30 +375,26 @@ impl graph::ChainApi for &self, ex: &graph::ExtrinsicFor, ) -> (graph::ExtrinsicHash, usize) { - ex.using_encoded(|x| { - (<::Hashing as HashT>::hash(x), x.len()) - }) + ex.using_encoded(|x| (<::Hashing as HashT>::hash(x), x.len())) } fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - let header = self.client.header(*id) + let header = self + .client + .header(*id) .and_then(|h| h.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))); let header = match header { Ok(header) => header, Err(err) => { log::warn!(target: "txpool", "Failed to query header: {:?}", err); - return Box::pin(ready(Ok(None))); - } + return Box::pin(ready(Ok(None))) + }, }; let fetcher = self.fetcher.clone(); async move { - let transactions = fetcher.remote_body({ - RemoteBodyRequest { - header, - retry_count: None, - } - }) + let transactions = fetcher + .remote_body(RemoteBodyRequest { header, retry_count: None }) .await .unwrap_or_else(|e| { log::warn!(target: "txpool", "Failed to fetch block body: {:?}", e); @@ -395,7 +402,8 @@ impl graph::ChainApi for }); Ok(Some(transactions)) - }.boxed() + } + .boxed() } fn block_header( diff --git a/substrate/client/transaction-pool/src/error.rs b/substrate/client/transaction-pool/src/error.rs index 23afab0c74a7bcd6ff64894705f5784cb01aad99..b14e0569f0830b537061bf4089965cec157d0e1c 100644 --- a/substrate/client/transaction-pool/src/error.rs +++ b/substrate/client/transaction-pool/src/error.rs @@ -40,7 +40,6 @@ pub enum Error { RuntimeApi(String), } - impl sc_transaction_pool_api::error::IntoPoolError for Error { fn into_pool_error(self) -> std::result::Result { match self { diff --git a/substrate/client/transaction-pool/src/graph/base_pool.rs b/substrate/client/transaction-pool/src/graph/base_pool.rs index db5927ea0c9981c3c9b9ec09644434e927e1c4ff..86433bea492859c52aed9f934f2c52b480ac64aa 100644 --- a/substrate/client/transaction-pool/src/graph/base_pool.rs +++ b/substrate/client/transaction-pool/src/graph/base_pool.rs @@ -20,24 +20,19 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{ - collections::HashSet, - fmt, - hash, - sync::Arc, -}; +use std::{collections::HashSet, fmt, hash, sync::Arc}; -use log::{trace, debug, warn}; +use log::{debug, trace, warn}; +use sc_transaction_pool_api::{error, InPoolTransaction, PoolStatus}; use serde::Serialize; use sp_core::hexdisplay::HexDisplay; -use sp_runtime::traits::Member; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, - TransactionLongevity as Longevity, - TransactionPriority as Priority, - TransactionSource as Source, +use sp_runtime::{ + traits::Member, + transaction_validity::{ + TransactionLongevity as Longevity, TransactionPriority as Priority, + TransactionSource as Source, TransactionTag as Tag, + }, }; -use sc_transaction_pool_api::{error, PoolStatus, InPoolTransaction}; use super::{ future::{FutureTransactions, WaitingTransaction}, @@ -62,7 +57,7 @@ pub enum Imported { Future { /// Hash of transaction that was successfully imported. hash: Hash, - } + }, } impl Imported { @@ -133,7 +128,7 @@ impl InPoolTransaction for Transaction { &self.priority } - fn longevity(&self) ->&Longevity { + fn longevity(&self) -> &Longevity { &self.valid_till } @@ -171,13 +166,17 @@ impl Transaction { } } -impl fmt::Debug for Transaction where +impl fmt::Debug for Transaction +where Hash: fmt::Debug, Extrinsic: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { let join_tags = |tags: &[Tag]| { - tags.iter().map(|tag| HexDisplay::from(tag).to_string()).collect::>().join(", ") + tags.iter() + .map(|tag| HexDisplay::from(tag).to_string()) + .collect::>() + .join(", ") }; write!(fmt, "Transaction {{ ")?; @@ -245,7 +244,10 @@ impl BasePool(&mut self, closure: impl FnOnce(&mut Self, bool) -> T) -> T { + pub(crate) fn with_futures_enabled( + &mut self, + closure: impl FnOnce(&mut Self, bool) -> T, + ) -> T { let previous = self.reject_future_transactions; self.reject_future_transactions = false; let return_value = closure(self, previous); @@ -265,19 +267,12 @@ impl BasePool, - ) -> error::Result> { + pub fn import(&mut self, tx: Transaction) -> error::Result> { if self.is_imported(&tx.hash) { return Err(error::Error::AlreadyImported(Box::new(tx.hash))) } - let tx = WaitingTransaction::new( - tx, - self.ready.provided_tags(), - &self.recently_pruned, - ); + let tx = WaitingTransaction::new(tx, self.ready.provided_tags(), &self.recently_pruned); trace!(target: "txpool", "[{:?}] {:?}", tx.transaction.hash, tx); debug!( target: "txpool", @@ -289,12 +284,12 @@ impl BasePool BasePool) -> error::Result> { + fn import_to_ready( + &mut self, + tx: WaitingTransaction, + ) -> error::Result> { let hash = tx.transaction.hash.clone(); let mut promoted = vec![]; let mut failed = vec![]; @@ -328,12 +326,13 @@ impl BasePool if first { - debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); - return Err(e) - } else { - failed.push(current_hash); - }, + Err(e) => + if first { + debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); + return Err(e) + } else { + failed.push(current_hash); + }, } first = false; } @@ -352,21 +351,16 @@ impl BasePool impl Iterator>> { + pub fn ready(&self) -> impl Iterator>> { self.ready.get() } /// Returns an iterator over future transactions in the pool. - pub fn futures(&self) -> impl Iterator> { + pub fn futures(&self) -> impl Iterator> { self.future.all() } @@ -378,11 +372,7 @@ impl BasePool BasePool Vec>> { + pub fn enforce_limits( + &mut self, + ready: &Limit, + future: &Limit, + ) -> Vec>> { let mut removed = vec![]; while ready.is_exceeded(self.ready.len(), self.ready.bytes()) { // find the worst transaction - let minimal = self.ready - .fold(|minimal, current| { - let transaction = ¤t.transaction; - match minimal { - None => Some(transaction.clone()), - Some(ref tx) if tx.insertion_id > transaction.insertion_id => { - Some(transaction.clone()) - }, - other => other, - } - }); + let minimal = self.ready.fold(|minimal, current| { + let transaction = ¤t.transaction; + match minimal { + None => Some(transaction.clone()), + Some(ref tx) if tx.insertion_id > transaction.insertion_id => + Some(transaction.clone()), + other => other, + } + }); if let Some(minimal) = minimal { removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) } else { - break; + break } } while future.is_exceeded(self.future.len(), self.future.bytes()) { // find the worst transaction - let minimal = self.future - .fold(|minimal, current| { - match minimal { - None => Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => { - Some(current.clone()) - }, - other => other, - } - }); + let minimal = self.future.fold(|minimal, current| match minimal { + None => Some(current.clone()), + Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), + other => other, + }); if let Some(minimal) = minimal { removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) } else { - break; + break } } @@ -467,7 +454,7 @@ impl BasePool) -> PruneStatus { + pub fn prune_tags(&mut self, tags: impl IntoIterator) -> PruneStatus { let mut to_import = vec![]; let mut pruned = vec![]; let recently_pruned = &mut self.recently_pruned[self.recently_pruned_index]; @@ -496,11 +483,7 @@ impl BasePool> = Transaction { + const DEFAULT_TX: Transaction> = Transaction { data: vec![], bytes: 1, hash: 1u64, @@ -558,11 +541,8 @@ mod tests { let mut pool = pool(); // when - pool.import(Transaction { - data: vec![1u8], - provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap(); // then assert_eq!(pool.ready().count(), 1); @@ -575,16 +555,10 @@ mod tests { let mut pool = pool(); // when - pool.import(Transaction { - data: vec![1u8], - provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); - pool.import(Transaction { - data: vec![1u8], - provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap_err(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap_err(); // then assert_eq!(pool.ready().count(), 1); @@ -601,16 +575,18 @@ mod tests { data: vec![1u8], requires: vec![vec![0]], provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); pool.import(Transaction { data: vec![2u8], hash: 2, provides: vec![vec![0]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then assert_eq!(pool.ready().count(), 2); @@ -627,37 +603,43 @@ mod tests { data: vec![1u8], requires: vec![vec![0]], provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], hash: 3, requires: vec![vec![2]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![2u8], hash: 2, requires: vec![vec![1]], provides: vec![vec![3], vec![2]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![4u8], hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![4]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); - let res = pool.import(Transaction { - data: vec![5u8], - hash: 5, - provides: vec![vec![0], vec![4]], - .. DEFAULT_TX.clone() - }).unwrap(); + let res = pool + .import(Transaction { + data: vec![5u8], + hash: 5, + provides: vec![vec![0], vec![4]], + ..DEFAULT_TX.clone() + }) + .unwrap(); // then let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); @@ -668,12 +650,15 @@ mod tests { assert_eq!(it.next(), Some(4)); assert_eq!(it.next(), Some(3)); assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 5, - promoted: vec![1, 2, 3, 4], - failed: vec![], - removed: vec![], - }); + assert_eq!( + res, + Imported::Ready { + hash: 5, + promoted: vec![1, 2, 3, 4], + failed: vec![], + removed: vec![], + } + ); } #[test] @@ -684,15 +669,17 @@ mod tests { data: vec![1u8], requires: vec![vec![0]], provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], hash: 3, requires: vec![vec![1]], provides: vec![vec![2]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); @@ -702,8 +689,9 @@ mod tests { hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then { @@ -714,24 +702,24 @@ mod tests { assert_eq!(pool.future.len(), 3); // let's close the cycle with one additional transaction - let res = pool.import(Transaction { - data: vec![4u8], - hash: 4, - priority: 50u64, - provides: vec![vec![0]], - .. DEFAULT_TX.clone() - }).unwrap(); + let res = pool + .import(Transaction { + data: vec![4u8], + hash: 4, + priority: 50u64, + provides: vec![vec![0]], + ..DEFAULT_TX.clone() + }) + .unwrap(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); assert_eq!(it.next(), Some(4)); assert_eq!(it.next(), Some(1)); assert_eq!(it.next(), Some(3)); assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 4, - promoted: vec![1, 3], - failed: vec![2], - removed: vec![], - }); + assert_eq!( + res, + Imported::Ready { hash: 4, promoted: vec![1, 3], failed: vec![2], removed: vec![] } + ); assert_eq!(pool.future.len(), 0); } @@ -743,15 +731,17 @@ mod tests { data: vec![1u8], requires: vec![vec![0]], provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], hash: 3, requires: vec![vec![1]], provides: vec![vec![2]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); @@ -761,8 +751,9 @@ mod tests { hash: 2, requires: vec![vec![2]], provides: vec![vec![0]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then { @@ -773,13 +764,15 @@ mod tests { assert_eq!(pool.future.len(), 3); // let's close the cycle with one additional transaction - let err = pool.import(Transaction { - data: vec![4u8], - hash: 4, - priority: 1u64, // lower priority than Tx(2) - provides: vec![vec![0]], - .. DEFAULT_TX.clone() - }).unwrap_err(); + let err = pool + .import(Transaction { + data: vec![4u8], + hash: 4, + priority: 1u64, // lower priority than Tx(2) + provides: vec![vec![0]], + ..DEFAULT_TX.clone() + }) + .unwrap_err(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); assert_eq!(it.next(), None); assert_eq!(pool.ready.len(), 0); @@ -797,14 +790,16 @@ mod tests { data: vec![5u8; 1024], hash: 5, provides: vec![vec![0], vec![4]], - .. DEFAULT_TX.clone() - }).expect("import 1 should be ok"); + ..DEFAULT_TX.clone() + }) + .expect("import 1 should be ok"); pool.import(Transaction { data: vec![3u8; 1024], hash: 7, provides: vec![vec![2], vec![7]], - .. DEFAULT_TX.clone() - }).expect("import 2 should be ok"); + ..DEFAULT_TX.clone() + }) + .expect("import 2 should be ok"); assert!(parity_util_mem::malloc_size(&pool) > 5000); } @@ -817,42 +812,48 @@ mod tests { data: vec![5u8], hash: 5, provides: vec![vec![0], vec![4]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![1u8], requires: vec![vec![0]], provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], hash: 3, requires: vec![vec![2]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![2u8], hash: 2, requires: vec![vec![1]], provides: vec![vec![3], vec![2]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![4u8], hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![4]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // future pool.import(Transaction { data: vec![6u8], hash: 6, priority: 1_000u64, requires: vec![vec![11]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 5); assert_eq!(pool.future.len(), 1); @@ -874,36 +875,37 @@ mod tests { hash: 5, requires: vec![vec![0]], provides: vec![vec![100]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // ready - pool.import(Transaction { - data: vec![1u8], - provides: vec![vec![1]], - .. DEFAULT_TX.clone() - }).unwrap(); + pool.import(Transaction { data: vec![1u8], provides: vec![vec![1]], ..DEFAULT_TX.clone() }) + .unwrap(); pool.import(Transaction { data: vec![2u8], hash: 2, requires: vec![vec![2]], provides: vec![vec![3]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![3u8], hash: 3, requires: vec![vec![1]], provides: vec![vec![2]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); pool.import(Transaction { data: vec![4u8], hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); assert_eq!(pool.ready().count(), 4); assert_eq!(pool.future.len(), 1); @@ -914,12 +916,10 @@ mod tests { // then assert_eq!(result.pruned.len(), 2); assert_eq!(result.failed.len(), 0); - assert_eq!(result.promoted[0], Imported::Ready { - hash: 5, - promoted: vec![], - failed: vec![], - removed: vec![], - }); + assert_eq!( + result.promoted[0], + Imported::Ready { hash: 5, promoted: vec![], failed: vec![], removed: vec![] } + ); assert_eq!(result.promoted.len(), 1); assert_eq!(pool.future.len(), 0); assert_eq!(pool.ready.len(), 3); @@ -929,40 +929,52 @@ mod tests { #[test] fn transaction_debug() { assert_eq!( - format!("{:?}", Transaction { - data: vec![4u8], - hash: 4, - priority: 1_000u64, - requires: vec![vec![3], vec![2]], - provides: vec![vec![4]], - .. DEFAULT_TX.clone() - }), + format!( + "{:?}", + Transaction { + data: vec![4u8], + hash: 4, + priority: 1_000u64, + requires: vec![vec![3], vec![2]], + provides: vec![vec![4]], + ..DEFAULT_TX.clone() + } + ), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}".to_owned() +source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}" + .to_owned() ); } #[test] fn transaction_propagation() { - assert_eq!(Transaction { + assert_eq!( + Transaction { data: vec![4u8], hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], - .. DEFAULT_TX.clone() - }.is_propagable(), true); + ..DEFAULT_TX.clone() + } + .is_propagable(), + true + ); - assert_eq!(Transaction { + assert_eq!( + Transaction { data: vec![4u8], hash: 4, priority: 1_000u64, requires: vec![vec![3], vec![2]], provides: vec![vec![4]], propagate: false, - .. DEFAULT_TX.clone() - }.is_propagable(), false); + ..DEFAULT_TX.clone() + } + .is_propagable(), + false + ); } #[test] @@ -978,7 +990,7 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ data: vec![5u8], hash: 5, requires: vec![vec![0]], - .. DEFAULT_TX.clone() + ..DEFAULT_TX.clone() }); if let Err(error::Error::RejectedFutureTransaction) = err { @@ -997,8 +1009,9 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ data: vec![5u8], hash: 5, requires: vec![vec![0]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); // then assert_eq!(pool.future.len(), 1); @@ -1022,8 +1035,9 @@ source: TransactionSource::External, requires: [03, 02], provides: [04], data: [ data: vec![5u8], hash: 5, requires: vec![vec![0]], - .. DEFAULT_TX.clone() - }).unwrap(); + ..DEFAULT_TX.clone() + }) + .unwrap(); flag }); diff --git a/substrate/client/transaction-pool/src/graph/future.rs b/substrate/client/transaction-pool/src/graph/future.rs index 083d3c7ec0613963550d458dcc499dd9dfa1bc7a..b0e70698f3832f377e7d0ff3b2b64a10d5123d4d 100644 --- a/substrate/client/transaction-pool/src/graph/future.rs +++ b/substrate/client/transaction-pool/src/graph/future.rs @@ -18,15 +18,12 @@ use std::{ collections::{HashMap, HashSet}, - fmt, - hash, + fmt, hash, sync::Arc, }; use sp_core::hexdisplay::HexDisplay; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, -}; +use sp_runtime::transaction_validity::TransactionTag as Tag; use wasm_timer::Instant; use super::base_pool::Transaction; @@ -48,10 +45,13 @@ impl fmt::Debug for WaitingTransaction>().join(", "), + fmt, + "missing_tags: {{{}}}", + self.missing_tags + .iter() + .map(|tag| HexDisplay::from(tag).to_string()) + .collect::>() + .join(", "), )?; write!(fmt, "}}") } @@ -77,22 +77,20 @@ impl WaitingTransaction { provided: &HashMap, recently_pruned: &[HashSet], ) -> Self { - let missing_tags = transaction.requires + let missing_tags = transaction + .requires .iter() .filter(|tag| { // is true if the tag is already satisfied either via transaction in the pool // or one that was recently included. - let is_provided = provided.contains_key(&**tag) || recently_pruned.iter().any(|x| x.contains(&**tag)); + let is_provided = provided.contains_key(&**tag) || + recently_pruned.iter().any(|x| x.contains(&**tag)); !is_provided }) .cloned() .collect(); - Self { - transaction: Arc::new(transaction), - missing_tags, - imported_at: Instant::now(), - } + Self { transaction: Arc::new(transaction), missing_tags, imported_at: Instant::now() } } /// Marks the tag as satisfied. @@ -121,10 +119,7 @@ pub struct FutureTransactions { impl Default for FutureTransactions { fn default() -> Self { - Self { - wanted_tags: Default::default(), - waiting: Default::default(), - } + Self { wanted_tags: Default::default(), waiting: Default::default() } } } @@ -144,7 +139,10 @@ impl FutureTransactions { /// we should remove the transactions from here and move them to the Ready queue. pub fn import(&mut self, tx: WaitingTransaction) { assert!(!tx.is_ready(), "Transaction is ready."); - assert!(!self.waiting.contains_key(&tx.transaction.hash), "Transaction is already imported."); + assert!( + !self.waiting.contains_key(&tx.transaction.hash), + "Transaction is already imported." + ); // Add all tags that are missing for tag in &tx.missing_tags { @@ -163,14 +161,20 @@ impl FutureTransactions { /// Returns a list of known transactions pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { - hashes.iter().map(|h| self.waiting.get(h).map(|x| x.transaction.clone())).collect() + hashes + .iter() + .map(|h| self.waiting.get(h).map(|x| x.transaction.clone())) + .collect() } /// Satisfies provided tags in transactions that are waiting for them. /// /// Returns (and removes) transactions that became ready after their last tag got /// satisfied and now we can remove them from Future and move to Ready queue. - pub fn satisfy_tags>(&mut self, tags: impl IntoIterator) -> Vec> { + pub fn satisfy_tags>( + &mut self, + tags: impl IntoIterator, + ) -> Vec> { let mut became_ready = vec![]; for tag in tags { @@ -205,7 +209,9 @@ impl FutureTransactions { let remove = if let Some(wanted) = self.wanted_tags.get_mut(&tag) { wanted.remove(hash); wanted.is_empty() - } else { false }; + } else { + false + }; if remove { self.wanted_tags.remove(&tag); } @@ -218,14 +224,15 @@ impl FutureTransactions { } /// Fold a list of future transactions to compute a single value. - pub fn fold, &WaitingTransaction) -> Option>(&mut self, f: F) -> Option { - self.waiting - .values() - .fold(None, f) + pub fn fold, &WaitingTransaction) -> Option>( + &mut self, + f: F, + ) -> Option { + self.waiting.values().fold(None, f) } /// Returns iterator over all future transactions - pub fn all(&self) -> impl Iterator> { + pub fn all(&self) -> impl Iterator> { self.waiting.values().map(|waiting| &*waiting.transaction) } @@ -265,7 +272,8 @@ mod tests { provides: vec![vec![3], vec![4]], propagate: true, source: TransactionSource::External, - }.into(), + } + .into(), missing_tags: vec![vec![1u8], vec![2u8]].into_iter().collect(), imported_at: std::time::Instant::now(), }); diff --git a/substrate/client/transaction-pool/src/graph/listener.rs b/substrate/client/transaction-pool/src/graph/listener.rs index a6987711f1dfbc1f9a9dd31dcfcb1a5434dc3fb2..b8149018f78365183de3d82f79d5237767c8d067 100644 --- a/substrate/client/transaction-pool/src/graph/listener.rs +++ b/substrate/client/transaction-pool/src/graph/listener.rs @@ -1,4 +1,3 @@ - // This file is part of Substrate. // Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. @@ -17,16 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{ - collections::HashMap, hash, fmt::Debug, -}; +use std::{collections::HashMap, fmt::Debug, hash}; use linked_hash_map::LinkedHashMap; -use serde::Serialize; use log::{debug, trace}; +use serde::Serialize; use sp_runtime::traits; -use super::{watcher, ChainApi, ExtrinsicHash, BlockHash}; +use super::{watcher, BlockHash, ChainApi, ExtrinsicHash}; /// Extrinsic pool default listener. pub struct Listener { @@ -39,15 +36,15 @@ const MAX_FINALITY_WATCHERS: usize = 512; impl Default for Listener { fn default() -> Self { - Self { - watchers: Default::default(), - finality_watchers: Default::default(), - } + Self { watchers: Default::default(), finality_watchers: Default::default() } } } impl Listener { - fn fire(&mut self, hash: &H, fun: F) where F: FnOnce(&mut watcher::Sender>) { + fn fire(&mut self, hash: &H, fun: F) + where + F: FnOnce(&mut watcher::Sender>), + { let clean = if let Some(h) = self.watchers.get_mut(hash) { fun(h); h.is_done() diff --git a/substrate/client/transaction-pool/src/graph/mod.rs b/substrate/client/transaction-pool/src/graph/mod.rs index 92e76b3ecf90b98b2b22cdb1568b66ce90abd19f..3ecfb8fe68c609b901bb19654498f89d54ba723b 100644 --- a/substrate/client/transaction-pool/src/graph/mod.rs +++ b/substrate/client/transaction-pool/src/graph/mod.rs @@ -31,15 +31,17 @@ mod listener; mod pool; mod ready; mod rotator; -mod validated_pool; mod tracked_map; +mod validated_pool; pub mod base_pool; pub mod watcher; -pub use self::base_pool::Transaction; -pub use validated_pool::{IsValidator, ValidatedTransaction}; -pub use self::pool::{ - BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, - NumberFor, Options, Pool, TransactionFor, +pub use self::{ + base_pool::Transaction, + pool::{ + BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, NumberFor, Options, Pool, + TransactionFor, + }, }; +pub use validated_pool::{IsValidator, ValidatedTransaction}; diff --git a/substrate/client/transaction-pool/src/graph/pool.rs b/substrate/client/transaction-pool/src/graph/pool.rs index 2c24f3779f0e976a4f116a1f8c51296e4c78dc77..c04c167bc750f658c86f29f291dcb2629af9441a 100644 --- a/substrate/client/transaction-pool/src/graph/pool.rs +++ b/substrate/client/transaction-pool/src/graph/pool.rs @@ -16,26 +16,23 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{ - collections::HashMap, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; -use futures::Future; +use futures::{channel::mpsc::Receiver, Future}; +use sc_transaction_pool_api::error; use sp_runtime::{ generic::BlockId, - traits::{self, SaturatedConversion, Block as BlockT}, + traits::{self, Block as BlockT, SaturatedConversion}, transaction_validity::{ - TransactionValidity, TransactionTag as Tag, TransactionValidityError, TransactionSource, + TransactionSource, TransactionTag as Tag, TransactionValidity, TransactionValidityError, }, }; -use sc_transaction_pool_api::error; use wasm_timer::Instant; -use futures::channel::mpsc::Receiver; use super::{ - base_pool as base, watcher::Watcher, - validated_pool::{IsValidator, ValidatedTransaction, ValidatedPool}, + base_pool as base, + validated_pool::{IsValidator, ValidatedPool, ValidatedTransaction}, + watcher::Watcher, }; /// Modification notification event stream type; @@ -52,11 +49,8 @@ pub type NumberFor = traits::NumberFor<::Block>; /// A type of transaction stored in the pool pub type TransactionFor = Arc, ExtrinsicFor>>; /// A type of validated transaction stored in the pool. -pub type ValidatedTransactionFor = ValidatedTransaction< - ExtrinsicHash, - ExtrinsicFor, - ::Error, ->; +pub type ValidatedTransactionFor = + ValidatedTransaction, ExtrinsicFor, ::Error>; /// Concrete extrinsic validation and query logic. pub trait ChainApi: Send + Sync { @@ -65,11 +59,12 @@ pub trait ChainApi: Send + Sync { /// Error type. type Error: From + error::IntoPoolError; /// Validate transaction future. - type ValidationFuture: Future> + Send + Unpin; + type ValidationFuture: Future> + Send + Unpin; /// Body future (since block body might be remote) - type BodyFuture: Future< - Output = Result::Extrinsic>>, Self::Error> - > + Unpin + Send + 'static; + type BodyFuture: Future::Extrinsic>>, Self::Error>> + + Unpin + + Send + + 'static; /// Verify extrinsic at given block. fn validate_transaction( @@ -118,14 +113,8 @@ pub struct Options { impl Default for Options { fn default() -> Self { Self { - ready: base::Limit { - count: 8192, - total_bytes: 20 * 1024 * 1024, - }, - future: base::Limit { - count: 512, - total_bytes: 1 * 1024 * 1024, - }, + ready: base::Limit { count: 8192, total_bytes: 20 * 1024 * 1024 }, + future: base::Limit { count: 512, total_bytes: 1 * 1024 * 1024 }, reject_future_transactions: false, } } @@ -157,9 +146,7 @@ where impl Pool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { - Self { - validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)), - } + Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)) } } /// Imports a bunch of unverified extrinsics to the pool @@ -167,7 +154,7 @@ impl Pool { &self, at: &BlockId, source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator>, ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::Yes).await?; @@ -181,7 +168,7 @@ impl Pool { &self, at: &BlockId, source: TransactionSource, - xts: impl IntoIterator>, + xts: impl IntoIterator>, ) -> Result, B::Error>>, B::Error> { let xts = xts.into_iter().map(|xt| (source, xt)); let validated_transactions = self.verify(at, xts, CheckBannedBeforeVerify::No).await?; @@ -207,13 +194,9 @@ impl Pool { xt: ExtrinsicFor, ) -> Result, ExtrinsicHash>, B::Error> { let block_number = self.resolve_block_number(at)?; - let (_, tx) = self.verify_one( - at, - block_number, - source, - xt, - CheckBannedBeforeVerify::Yes, - ).await; + let (_, tx) = self + .verify_one(at, block_number, source, xt, CheckBannedBeforeVerify::Yes) + .await; self.validated_pool.submit_and_watch(tx) } @@ -222,7 +205,6 @@ impl Pool { &self, revalidated_transactions: HashMap, ValidatedTransactionFor>, ) { - let now = Instant::now(); self.validated_pool.resubmit(revalidated_transactions); log::debug!(target: "txpool", @@ -243,13 +225,17 @@ impl Pool { hashes: &[ExtrinsicHash], ) -> Result<(), B::Error> { // Get details of all extrinsics that are already in the pool - let in_pool_tags = self.validated_pool.extrinsics_tags(hashes) - .into_iter().filter_map(|x| x).flatten(); + let in_pool_tags = self + .validated_pool + .extrinsics_tags(hashes) + .into_iter() + .filter_map(|x| x) + .flatten(); // Prune all transactions that provide given tags let prune_status = self.validated_pool.prune_tags(in_pool_tags)?; - let pruned_transactions = hashes.iter().cloned() - .chain(prune_status.pruned.iter().map(|tx| tx.hash)); + let pruned_transactions = + hashes.iter().cloned().chain(prune_status.pruned.iter().map(|tx| tx.hash)); self.validated_pool.fire_pruned(at, pruned_transactions) } @@ -272,7 +258,8 @@ impl Pool { extrinsics.len() ); // Get details of all extrinsics that are already in the pool - let in_pool_hashes = extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::>(); + let in_pool_hashes = + extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::>(); let in_pool_tags = self.validated_pool.extrinsics_tags(&in_pool_hashes); // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option>)`) @@ -286,7 +273,9 @@ impl Pool { // if it's not found in the pool query the runtime at parent block // to get validity info and tags that the extrinsic provides. None => { - let validity = self.validated_pool.api() + let validity = self + .validated_pool + .api() .validate_transaction(parent, TransactionSource::InBlock, extrinsic.clone()) .await; @@ -324,8 +313,8 @@ impl Pool { pub async fn prune_tags( &self, at: &BlockId, - tags: impl IntoIterator, - known_imported_hashes: impl IntoIterator> + Clone, + tags: impl IntoIterator, + known_imported_hashes: impl IntoIterator> + Clone, ) -> Result<(), B::Error> { log::debug!(target: "txpool", "Pruning at {:?}", at); // Prune all transactions that provide given tags @@ -334,22 +323,17 @@ impl Pool { // Make sure that we don't revalidate extrinsics that were part of the recently // imported block. This is especially important for UTXO-like chains cause the // inputs are pruned so such transaction would go to future again. - self.validated_pool.ban(&Instant::now(), known_imported_hashes.clone().into_iter()); + self.validated_pool + .ban(&Instant::now(), known_imported_hashes.clone().into_iter()); // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. - let pruned_hashes = prune_status.pruned - .iter() - .map(|tx| tx.hash).collect::>(); - let pruned_transactions = prune_status.pruned - .into_iter() - .map(|tx| (tx.source, tx.data.clone())); + let pruned_hashes = prune_status.pruned.iter().map(|tx| tx.hash).collect::>(); + let pruned_transactions = + prune_status.pruned.into_iter().map(|tx| (tx.source, tx.data.clone())); - let reverified_transactions = self.verify( - at, - pruned_transactions, - CheckBannedBeforeVerify::Yes, - ).await?; + let reverified_transactions = + self.verify(at, pruned_transactions, CheckBannedBeforeVerify::Yes).await?; log::trace!(target: "txpool", "Pruning at {:?}. Resubmitting transactions.", at); // And finally - submit reverified transactions back to the pool @@ -369,16 +353,16 @@ impl Pool { /// Resolves block number by id. fn resolve_block_number(&self, at: &BlockId) -> Result, B::Error> { - self.validated_pool.api().block_id_to_number(at) - .and_then(|number| number.ok_or_else(|| - error::Error::InvalidBlockId(format!("{:?}", at)).into())) + self.validated_pool.api().block_id_to_number(at).and_then(|number| { + number.ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into()) + }) } /// Returns future that validates a bunch of transactions at given block. async fn verify( &self, at: &BlockId, - xts: impl IntoIterator)>, + xts: impl IntoIterator)>, check: CheckBannedBeforeVerify, ) -> Result, ValidatedTransactionFor>, B::Error> { // we need a block number to compute tx validity @@ -386,8 +370,11 @@ impl Pool { let res = futures::future::join_all( xts.into_iter() - .map(|(source, xt)| self.verify_one(at, block_number, source, xt, check)) - ).await.into_iter().collect::>(); + .map(|(source, xt)| self.verify_one(at, block_number, source, xt, check)), + ) + .await + .into_iter() + .collect::>(); Ok(res) } @@ -408,11 +395,11 @@ impl Pool { return (hash, ValidatedTransaction::Invalid(hash, err)) } - let validation_result = self.validated_pool.api().validate_transaction( - block_id, - source, - xt.clone(), - ).await; + let validation_result = self + .validated_pool + .api() + .validate_transaction(block_id, source, xt.clone()) + .await; let status = match validation_result { Ok(status) => status, @@ -420,7 +407,7 @@ impl Pool { }; let validity = match status { - Ok(validity) => { + Ok(validity) => if validity.provides.is_empty() { ValidatedTransaction::Invalid(hash, error::Error::NoTagsProvided.into()) } else { @@ -432,8 +419,7 @@ impl Pool { bytes, validity, ) - } - }, + }, Err(TransactionValidityError::Invalid(e)) => ValidatedTransaction::Invalid(hash, error::Error::InvalidTransaction(e).into()), Err(TransactionValidityError::Unknown(e)) => @@ -444,35 +430,32 @@ impl Pool { } /// get a reference to the underlying validated pool. - pub fn validated_pool(&self) -> &ValidatedPool { + pub fn validated_pool(&self) -> &ValidatedPool { &self.validated_pool } } impl Clone for Pool { fn clone(&self) -> Self { - Self { - validated_pool: self.validated_pool.clone(), - } + Self { validated_pool: self.validated_pool.clone() } } } #[cfg(test)] mod tests { - use std::collections::{HashMap, HashSet}; - use parking_lot::Mutex; + use super::{super::base_pool::Limit, *}; + use assert_matches::assert_matches; + use codec::Encode; use futures::executor::block_on; - use super::*; + use parking_lot::Mutex; use sc_transaction_pool_api::TransactionStatus; use sp_runtime::{ traits::Hash, - transaction_validity::{ValidTransaction, InvalidTransaction, TransactionSource}, + transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; - use codec::Encode; - use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId, Hashing}; - use assert_matches::assert_matches; + use std::collections::{HashMap, HashSet}; + use substrate_test_runtime::{AccountId, Block, Extrinsic, Hashing, Transfer, H256}; use wasm_timer::Instant; - use super::super::base_pool::Limit; const INVALID_NONCE: u64 = 254; const SOURCE: TransactionSource = TransactionSource::External; @@ -522,8 +505,16 @@ mod tests { } else { let mut transaction = ValidTransaction { priority: 4, - requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, - provides: if nonce == INVALID_NONCE { vec![] } else { vec![vec![nonce as u8]] }, + requires: if nonce > block_number { + vec![vec![nonce as u8 - 1]] + } else { + vec![] + }, + provides: if nonce == INVALID_NONCE { + vec![] + } else { + vec![vec![nonce as u8]] + }, longevity: 3, propagate: true, }; @@ -539,15 +530,13 @@ mod tests { Ok(transaction) } }, - Extrinsic::IncludeData(_) => { - Ok(ValidTransaction { - priority: 9001, - requires: vec![], - provides: vec![vec![42]], - longevity: 9001, - propagate: false, - }) - }, + Extrinsic::IncludeData(_) => Ok(ValidTransaction { + priority: 9001, + requires: vec![], + provides: vec![vec![42]], + longevity: 9001, + propagate: false, + }), _ => unimplemented!(), }; @@ -613,12 +602,17 @@ mod tests { let pool = pool(); // when - let hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); // then assert_eq!(pool.validated_pool().ready().map(|v| v.hash).collect::>(), vec![hash]); @@ -673,25 +667,40 @@ mod tests { let stream = pool.validated_pool().import_notification_stream(); // when - let hash0 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); + let hash0 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); // future doesn't count - let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }))).unwrap(); + let _hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); assert_eq!(pool.validated_pool().status().future, 1); @@ -710,24 +719,39 @@ mod tests { fn should_clear_stale_transactions() { // given let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); - let hash3 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }))).unwrap(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + let hash2 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); + let hash3 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + )) + .unwrap(); // when pool.validated_pool.clear_stale(&BlockId::Number(5)).unwrap(); @@ -746,12 +770,17 @@ mod tests { fn should_ban_mined_transactions() { // given let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); // when block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()])).unwrap(); @@ -763,34 +792,37 @@ mod tests { #[test] fn should_limit_futures() { // given - let limit = Limit { - count: 100, - total_bytes: 200, - }; + let limit = Limit { count: 100, total_bytes: 200 }; - let options = Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }; + let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let pool = Pool::new(options, true.into(), TestApi::default().into()); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().future, 1); // when - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(2)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 10, - }))).unwrap(); + let hash2 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(2)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 10, + }), + )) + .unwrap(); // then assert_eq!(pool.validated_pool().status().future, 1); @@ -801,26 +833,24 @@ mod tests { #[test] fn should_error_if_reject_immediately() { // given - let limit = Limit { - count: 100, - total_bytes: 10, - }; + let limit = Limit { count: 100, total_bytes: 10 }; - let options = Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }; + let options = Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let pool = Pool::new(options, true.into(), TestApi::default().into()); // when - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap_err(); + block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap_err(); // then assert_eq!(pool.validated_pool().status().ready, 0); @@ -833,12 +863,17 @@ mod tests { let pool = pool(); // when - let err = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: INVALID_NONCE, - }))).unwrap_err(); + let err = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: INVALID_NONCE, + }), + )) + .unwrap_err(); // then assert_eq!(pool.validated_pool().status().ready, 0); @@ -853,12 +888,17 @@ mod tests { fn should_trigger_ready_and_finalized() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); @@ -880,19 +920,27 @@ mod tests { fn should_trigger_ready_and_finalized_when_pruning_via_hash() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 0); // when - block_on( - pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![watcher.hash().clone()]), - ).unwrap(); + block_on(pool.prune_tags( + &BlockId::Number(2), + vec![vec![0u8]], + vec![watcher.hash().clone()], + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -909,22 +957,32 @@ mod tests { fn should_trigger_future_and_ready_after_promoted() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 1); // when - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); + block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); assert_eq!(pool.validated_pool().status().ready, 2); // then @@ -943,13 +1001,13 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + let watcher = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when pool.validated_pool.remove_invalid(&[*watcher.hash()]); - // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -967,7 +1025,8 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + let watcher = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -976,7 +1035,6 @@ mod tests { map.insert(*watcher.hash(), peers.clone()); pool.validated_pool().on_broadcasted(map); - // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -986,15 +1044,9 @@ mod tests { #[test] fn should_trigger_dropped() { // given - let limit = Limit { - count: 1, - total_bytes: 1000, - }; - let options = Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }; + let limit = Limit { count: 1, total_bytes: 1000 }; + let options = + Options { ready: limit.clone(), future: limit.clone(), ..Default::default() }; let pool = Pool::new(options, true.into(), TestApi::default().into()); @@ -1064,7 +1116,6 @@ mod tests { block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap(); assert_eq!(pool.validated_pool().status().ready, 0); - // so when we release the verification of the previous one it will have // something in `requires`, but should go to ready directly, since the previous transaction was imported // correctly. diff --git a/substrate/client/transaction-pool/src/graph/ready.rs b/substrate/client/transaction-pool/src/graph/ready.rs index 46f13f4e82dc9cd543ed03c75a2eefc855a3b8ce..ac842b99bf12cba7dbf0fc44811b5bf90ae4f5fc 100644 --- a/substrate/client/transaction-pool/src/graph/ready.rs +++ b/substrate/client/transaction-pool/src/graph/ready.rs @@ -17,19 +17,16 @@ // along with this program. If not, see . use std::{ - collections::{HashMap, HashSet, BTreeSet}, cmp, + collections::{BTreeSet, HashMap, HashSet}, hash, sync::Arc, }; -use serde::Serialize; use log::trace; -use sp_runtime::traits::Member; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, -}; use sc_transaction_pool_api::error; +use serde::Serialize; +use sp_runtime::{traits::Member, transaction_validity::TransactionTag as Tag}; use super::{ base_pool::Transaction, @@ -50,16 +47,15 @@ pub struct TransactionRef { impl Clone for TransactionRef { fn clone(&self) -> Self { - Self { - transaction: self.transaction.clone(), - insertion_id: self.insertion_id, - } + Self { transaction: self.transaction.clone(), insertion_id: self.insertion_id } } } impl Ord for TransactionRef { fn cmp(&self, other: &Self) -> cmp::Ordering { - self.transaction.priority.cmp(&other.transaction.priority) + self.transaction + .priority + .cmp(&other.transaction.priority) .then_with(|| other.transaction.valid_till.cmp(&self.transaction.valid_till)) .then_with(|| other.insertion_id.cmp(&self.insertion_id)) } @@ -149,7 +145,7 @@ impl ReadyTransactions { /// /// Transactions are returned in order: /// 1. First by the dependencies: - /// - never return transaction that requires a tag, which was not provided by one of the previously + /// - never return transaction that requires a tag, which was not provided by one of the previously /// returned transactions /// 2. Then by priority: /// - If there are two transactions with all requirements satisfied the one with higher priority goes first. @@ -157,7 +153,7 @@ impl ReadyTransactions { /// - transactions that are valid for a shorter time go first /// 4. Lastly we sort by the time in the queue /// - transactions that are longer in the queue go first - pub fn get(&self) -> impl Iterator>> { + pub fn get(&self) -> impl Iterator>> { BestIterator { all: self.ready.clone(), best: self.best.clone(), @@ -176,9 +172,13 @@ impl ReadyTransactions { ) -> error::Result>>> { assert!( tx.is_ready(), - "Only ready transactions can be imported. Missing: {:?}", tx.missing_tags + "Only ready transactions can be imported. Missing: {:?}", + tx.missing_tags + ); + assert!( + !self.ready.read().contains_key(&tx.transaction.hash), + "Transaction is already imported." ); - assert!(!self.ready.read().contains_key(&tx.transaction.hash), "Transaction is already imported."); self.insertion_id += 1; let insertion_id = self.insertion_id; @@ -201,7 +201,7 @@ impl ReadyTransactions { } else { requires_offset += 1; } - } + } // update provided_tags // call to replace_previous guarantees that we will be overwriting @@ -210,10 +210,7 @@ impl ReadyTransactions { self.provided_tags.insert(tag.clone(), hash.clone()); } - let transaction = TransactionRef { - insertion_id, - transaction - }; + let transaction = TransactionRef { insertion_id, transaction }; // insert to best if it doesn't require any other transaction to be included before it if goes_to_best { @@ -221,21 +218,17 @@ impl ReadyTransactions { } // insert to Ready - ready.insert(hash, ReadyTx { - transaction, - unlocks, - requires_offset, - }); + ready.insert(hash, ReadyTx { transaction, unlocks, requires_offset }); Ok(replaced) } /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>(&mut self, f: F) -> Option { - self.ready - .read() - .values() - .fold(None, f) + pub fn fold, &ReadyTx) -> Option>( + &mut self, + f: F, + ) -> Option { + self.ready.read().values().fold(None, f) } /// Returns true if given transaction is part of the queue. @@ -251,9 +244,10 @@ impl ReadyTransactions { /// Retrieve transactions by hash pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { let ready = self.ready.read(); - hashes.iter().map(|hash| { - ready.get(hash).map(|x| x.transaction.transaction.clone()) - }).collect() + hashes + .iter() + .map(|hash| ready.get(hash).map(|x| x.transaction.transaction.clone())) + .collect() } /// Removes a subtree of transactions from the ready pool. @@ -280,13 +274,12 @@ impl ReadyTransactions { let mut ready = self.ready.write(); while let Some(hash) = to_remove.pop() { if let Some(mut tx) = ready.remove(&hash) { - let invalidated = tx.transaction.transaction.provides - .iter() - .filter(|tag| provides_tag_filter + let invalidated = tx.transaction.transaction.provides.iter().filter(|tag| { + provides_tag_filter .as_ref() .map(|filter| !filter.contains(&**tag)) .unwrap_or(true) - ); + }); let mut removed_some_tags = false; // remove entries from provided_tags @@ -331,7 +324,9 @@ impl ReadyTransactions { let mut to_remove = vec![tag]; while let Some(tag) = to_remove.pop() { - let res = self.provided_tags.remove(&tag) + let res = self + .provided_tags + .remove(&tag) .and_then(|hash| self.ready.write().remove(&hash)); if let Some(tx) = res { @@ -417,19 +412,18 @@ impl ReadyTransactions { fn replace_previous( &mut self, tx: &Transaction, - ) -> error::Result< - (Vec>>, Vec) - > { + ) -> error::Result<(Vec>>, Vec)> { let (to_remove, unlocks) = { // check if we are replacing a transaction - let replace_hashes = tx.provides + let replace_hashes = tx + .provides .iter() .filter_map(|tag| self.provided_tags.get(tag)) .collect::>(); // early exit if we are not replacing anything. if replace_hashes.is_empty() { - return Ok((vec![], vec![])); + return Ok((vec![], vec![])) } // now check if collective priority is lower than the replacement transaction. @@ -438,9 +432,9 @@ impl ReadyTransactions { replace_hashes .iter() .filter_map(|hash| ready.get(hash)) - .fold(0u64, |total, tx| + .fold(0u64, |total, tx| { total.saturating_add(tx.transaction.transaction.priority) - ) + }) }; // bail - the transaction has too low priority to replace the old ones @@ -451,28 +445,22 @@ impl ReadyTransactions { // construct a list of unlocked transactions let unlocks = { let ready = self.ready.read(); - replace_hashes - .iter() - .filter_map(|hash| ready.get(hash)) - .fold(vec![], |mut list, tx| { + replace_hashes.iter().filter_map(|hash| ready.get(hash)).fold( + vec![], + |mut list, tx| { list.extend(tx.unlocks.iter().cloned()); list - }) + }, + ) }; - ( - replace_hashes.into_iter().cloned().collect::>(), - unlocks - ) + (replace_hashes.into_iter().cloned().collect::>(), unlocks) }; let new_provides = tx.provides.iter().cloned().collect::>(); let removed = self.remove_subtree_with_tag_filter(to_remove, Some(new_provides)); - Ok(( - removed, - unlocks - )) + Ok((removed, unlocks)) } /// Returns number of transactions in this queue. @@ -500,7 +488,6 @@ impl BestIterator { if satisfied >= tx_ref.transaction.requires.len() { // If we have satisfied all deps insert to best self.best.insert(tx_ref); - } else { // otherwise we're still awaiting for some deps self.awaiting.insert(tx_ref.transaction.hash.clone(), (satisfied, tx_ref)); @@ -531,7 +518,10 @@ impl Iterator for BestIterator { Some((satisfied, tx_ref)) // then get from the pool } else { - self.all.read().get(hash).map(|next| (next.requires_offset + 1, next.transaction.clone())) + self.all + .read() + .get(hash) + .map(|next| (next.requires_offset + 1, next.transaction.clone())) }; if let Some((satisfied, tx_ref)) = res { self.best_or_awaiting(satisfied, tx_ref) @@ -571,7 +561,7 @@ mod tests { fn import( ready: &mut ReadyTransactions, - tx: Transaction + tx: Transaction, ) -> error::Result>>> { let x = WaitingTransaction::new(tx, ready.provided_tags(), &[]); ready.import(x) @@ -662,7 +652,7 @@ mod tests { bytes: 1, hash: 5, priority: 1, - valid_till: u64::MAX, // use the max here for testing. + valid_till: u64::MAX, // use the max here for testing. requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, @@ -695,7 +685,7 @@ mod tests { bytes: 1, hash: 5, priority: 1, - valid_till: u64::MAX, // use the max here for testing. + valid_till: u64::MAX, // use the max here for testing. requires: vec![], provides: vec![], propagate: true, @@ -717,28 +707,19 @@ mod tests { tx }; // higher priority = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(2, 3)), - insertion_id: 2, - }); + assert!( + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(2, 3)), insertion_id: 2 } + ); // lower validity = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 2)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); + assert!( + TransactionRef { transaction: Arc::new(with_priority(3, 2)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + ); // lower insertion_id = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); + assert!( + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 1 } > + TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } + ); } } diff --git a/substrate/client/transaction-pool/src/graph/rotator.rs b/substrate/client/transaction-pool/src/graph/rotator.rs index 0e4fd0abf2974e6e55683acd4dedc335dca19017..820fde35dac18780b6d8ef501238082d1e885c11 100644 --- a/substrate/client/transaction-pool/src/graph/rotator.rs +++ b/substrate/client/transaction-pool/src/graph/rotator.rs @@ -21,13 +21,8 @@ //! Keeps only recent extrinsic and discard the ones kept for a significant amount of time. //! Discarded extrinsics are banned so that they don't get re-imported again. -use std::{ - collections::HashMap, - hash, - iter, - time::Duration, -}; use parking_lot::RwLock; +use std::{collections::HashMap, hash, iter, time::Duration}; use wasm_timer::Instant; use super::base_pool::Transaction; @@ -48,10 +43,7 @@ pub struct PoolRotator { impl Default for PoolRotator { fn default() -> Self { - Self { - ban_time: Duration::from_secs(60 * 30), - banned_until: Default::default(), - } + Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default() } } } @@ -62,7 +54,7 @@ impl PoolRotator { } /// Bans given set of hashes. - pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { + pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { let mut banned = self.banned_until.write(); for hash in hashes { @@ -81,9 +73,14 @@ impl PoolRotator { /// Bans extrinsic if it's stale. /// /// Returns `true` if extrinsic is stale and got banned. - pub fn ban_if_stale(&self, now: &Instant, current_block: u64, xt: &Transaction) -> bool { + pub fn ban_if_stale( + &self, + now: &Instant, + current_block: u64, + xt: &Transaction, + ) -> bool { if xt.valid_till > current_block { - return false; + return false } self.ban(now, iter::once(xt.hash.clone())); @@ -107,10 +104,7 @@ mod tests { type Ex = (); fn rotator() -> PoolRotator { - PoolRotator { - ban_time: Duration::from_millis(10), - ..Default::default() - } + PoolRotator { ban_time: Duration::from_millis(10), ..Default::default() } } fn tx() -> (Hash, Transaction) { @@ -160,7 +154,6 @@ mod tests { assert!(rotator.is_banned(&hash)); } - #[test] fn should_clear_banned() { // given @@ -201,14 +194,14 @@ mod tests { let past_block = 0; // when - for i in 0..2*EXPECTED_SIZE { + for i in 0..2 * EXPECTED_SIZE { let tx = tx_with(i as u64, past_block); assert!(rotator.ban_if_stale(&now, past_block, &tx)); } - assert_eq!(rotator.banned_until.read().len(), 2*EXPECTED_SIZE); + assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE); // then - let tx = tx_with(2*EXPECTED_SIZE as u64, past_block); + let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block); // trigger a garbage collection assert!(rotator.ban_if_stale(&now, past_block, &tx)); assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); diff --git a/substrate/client/transaction-pool/src/graph/tracked_map.rs b/substrate/client/transaction-pool/src/graph/tracked_map.rs index 98fd9e21b3160443691314c0eabe2b77d24898df..c1fdda227c6ae189cf73c6fca92559fb02691409 100644 --- a/substrate/client/transaction-pool/src/graph/tracked_map.rs +++ b/substrate/client/transaction-pool/src/graph/tracked_map.rs @@ -16,11 +16,14 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard}; use std::{ collections::HashMap, - sync::{Arc, atomic::{AtomicIsize, Ordering as AtomicOrdering}}, + sync::{ + atomic::{AtomicIsize, Ordering as AtomicOrdering}, + Arc, + }, }; -use parking_lot::{RwLock, RwLockWriteGuard, RwLockReadGuard}; /// Something that can report its size. pub trait Size { @@ -39,11 +42,7 @@ pub struct TrackedMap { impl Default for TrackedMap { fn default() -> Self { - Self { - index: Arc::new(HashMap::default().into()), - bytes: 0.into(), - length: 0.into(), - } + Self { index: Arc::new(HashMap::default().into()), bytes: 0.into(), length: 0.into() } } } @@ -65,9 +64,7 @@ impl TrackedMap { /// Lock map for read. pub fn read(&self) -> TrackedMapReadAccess { - TrackedMapReadAccess { - inner_guard: self.index.read(), - } + TrackedMapReadAccess { inner_guard: self.index.read() } } /// Lock map for write. @@ -87,13 +84,11 @@ pub struct ReadOnlyTrackedMap(Arc>>); impl ReadOnlyTrackedMap where - K: Eq + std::hash::Hash + K: Eq + std::hash::Hash, { /// Lock map for read. pub fn read(&self) -> TrackedMapReadAccess { - TrackedMapReadAccess { - inner_guard: self.0.read(), - } + TrackedMapReadAccess { inner_guard: self.0.read() } } } @@ -103,7 +98,7 @@ pub struct TrackedMapReadAccess<'a, K, V> { impl<'a, K, V> TrackedMapReadAccess<'a, K, V> where - K: Eq + std::hash::Hash + K: Eq + std::hash::Hash, { /// Returns true if map contains key. pub fn contains_key(&self, key: &K) -> bool { @@ -129,7 +124,8 @@ pub struct TrackedMapWriteAccess<'a, K, V> { impl<'a, K, V> TrackedMapWriteAccess<'a, K, V> where - K: Eq + std::hash::Hash, V: Size + K: Eq + std::hash::Hash, + V: Size, { /// Insert value and return previous (if any). pub fn insert(&mut self, key: K, val: V) -> Option { @@ -165,7 +161,9 @@ mod tests { use super::*; impl Size for i32 { - fn size(&self) -> usize { *self as usize / 10 } + fn size(&self) -> usize { + *self as usize / 10 + } } #[test] diff --git a/substrate/client/transaction-pool/src/graph/validated_pool.rs b/substrate/client/transaction-pool/src/graph/validated_pool.rs index 5feba94dc56b12c1acc8b5c9b5e5b2d439a5ca5b..3ac7f002077cbcfd38810334d22a9c04a5cfc3e8 100644 --- a/substrate/client/transaction-pool/src/graph/validated_pool.rs +++ b/substrate/client/transaction-pool/src/graph/validated_pool.rs @@ -17,27 +17,31 @@ // along with this program. If not, see . use std::{ - collections::{HashSet, HashMap}, + collections::{HashMap, HashSet}, hash, sync::Arc, }; -use serde::Serialize; +use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; +use retain_mut::RetainMut; +use sc_transaction_pool_api::{error, PoolStatus}; +use serde::Serialize; use sp_runtime::{ generic::BlockId, traits::{self, SaturatedConversion}, - transaction_validity::{TransactionTag as Tag, ValidTransaction, TransactionSource}, + transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, }; -use sc_transaction_pool_api::{error, PoolStatus}; use wasm_timer::Instant; -use futures::channel::mpsc::{channel, Sender}; -use retain_mut::RetainMut; use super::{ - base_pool::{self as base, PruneStatus}, watcher::Watcher, - listener::Listener, rotator::PoolRotator, - pool::{EventStream, Options, ChainApi, BlockHash, ExtrinsicHash, ExtrinsicFor, TransactionFor}, + base_pool::{self as base, PruneStatus}, + listener::Listener, + pool::{ + BlockHash, ChainApi, EventStream, ExtrinsicFor, ExtrinsicHash, Options, TransactionFor, + }, + rotator::PoolRotator, + watcher::Watcher, }; /// Pre-validated transaction. Validated pool only accepts transactions wrapped in this enum. @@ -72,19 +76,14 @@ impl ValidatedTransaction { requires: validity.requires, provides: validity.provides, propagate: validity.propagate, - valid_till: at - .saturated_into::() - .saturating_add(validity.longevity), + valid_till: at.saturated_into::().saturating_add(validity.longevity), }) } } /// A type of validated transaction stored in the pool. -pub type ValidatedTransactionFor = ValidatedTransaction< - ExtrinsicHash, - ExtrinsicFor, - ::Error, ->; +pub type ValidatedTransactionFor = + ValidatedTransaction, ExtrinsicFor, ::Error>; /// A closure that returns true if the local node is a validator that can author blocks. pub struct IsValidator(Box bool + Send + Sync>); @@ -107,10 +106,7 @@ pub struct ValidatedPool { is_validator: IsValidator, options: Options, listener: RwLock, B>>, - pool: RwLock, - ExtrinsicFor, - >>, + pool: RwLock, ExtrinsicFor>>, import_notification_sinks: Mutex>>>, rotator: PoolRotator>, } @@ -142,7 +138,7 @@ impl ValidatedPool { } /// Bans given set of hashes. - pub fn ban(&self, now: &Instant, hashes: impl IntoIterator>) { + pub fn ban(&self, now: &Instant, hashes: impl IntoIterator>) { self.rotator.ban(now, hashes) } @@ -173,9 +169,10 @@ impl ValidatedPool { /// Imports a bunch of pre-validated transactions to the pool. pub fn submit( &self, - txs: impl IntoIterator>, + txs: impl IntoIterator>, ) -> Vec, B::Error>> { - let results = txs.into_iter() + let results = txs + .into_iter() .map(|validated_tx| self.submit_one(validated_tx)) .collect::>(); @@ -186,10 +183,14 @@ impl ValidatedPool { Default::default() }; - results.into_iter().map(|res| match res { - Ok(ref hash) if removed.contains(hash) => Err(error::Error::ImmediatelyDropped.into()), - other => other, - }).collect() + results + .into_iter() + .map(|res| match res { + Ok(ref hash) if removed.contains(hash) => + Err(error::Error::ImmediatelyDropped.into()), + other => other, + }) + .collect() } /// Submit single pre-validated transaction to the pool. @@ -197,30 +198,28 @@ impl ValidatedPool { match tx { ValidatedTransaction::Valid(tx) => { if !tx.propagate && !(self.is_validator.0)() { - return Err(error::Error::Unactionable.into()); + return Err(error::Error::Unactionable.into()) } let imported = self.pool.write().import(tx)?; if let base::Imported::Ready { ref hash, .. } = imported { - self.import_notification_sinks.lock() - .retain_mut(|sink| { - match sink.try_send(*hash) { - Ok(()) => true, - Err(e) => { - if e.is_full() { - log::warn!( - target: "txpool", - "[{:?}] Trying to notify an import but the channel is full", - hash, - ); - true - } else { - false - } + self.import_notification_sinks.lock().retain_mut(|sink| { + match sink.try_send(*hash) { + Ok(()) => true, + Err(e) => + if e.is_full() { + log::warn!( + target: "txpool", + "[{:?}] Trying to notify an import but the channel is full", + hash, + ); + true + } else { + false }, - } - }); + } + }); } let mut listener = self.listener.write(); @@ -244,8 +243,8 @@ impl ValidatedPool { let future_limit = &self.options.future; log::debug!(target: "txpool", "Pool Status: {:?}", status); - if ready_limit.is_exceeded(status.ready, status.ready_bytes) - || future_limit.is_exceeded(status.future, status.future_bytes) + if ready_limit.is_exceeded(status.ready, status.ready_bytes) || + future_limit.is_exceeded(status.future, status.future_bytes) { log::debug!( target: "txpool", @@ -257,8 +256,11 @@ impl ValidatedPool { // clean up the pool let removed = { let mut pool = self.pool.write(); - let removed = pool.enforce_limits(ready_limit, future_limit) - .into_iter().map(|x| x.hash).collect::>(); + let removed = pool + .enforce_limits(ready_limit, future_limit) + .into_iter() + .map(|x| x.hash) + .collect::>(); // ban all removed transactions self.rotator.ban(&Instant::now(), removed.iter().copied()); removed @@ -305,9 +307,17 @@ impl ValidatedPool { /// /// Removes and then submits passed transactions and all dependent transactions. /// Transactions that are missing from the pool are not submitted. - pub fn resubmit(&self, mut updated_transactions: HashMap, ValidatedTransactionFor>) { + pub fn resubmit( + &self, + mut updated_transactions: HashMap, ValidatedTransactionFor>, + ) { #[derive(Debug, Clone, Copy, PartialEq)] - enum Status { Future, Ready, Failed, Dropped } + enum Status { + Future, + Ready, + Failed, + Dropped, + } let (mut initial_statuses, final_statuses) = { let mut pool = self.pool.write(); @@ -322,7 +332,11 @@ impl ValidatedPool { let mut initial_statuses = HashMap::new(); let mut txs_to_resubmit = Vec::with_capacity(updated_transactions.len()); while !updated_transactions.is_empty() { - let hash = updated_transactions.keys().next().cloned().expect("transactions is not empty; qed"); + let hash = updated_transactions + .keys() + .next() + .cloned() + .expect("transactions is not empty; qed"); // note we are not considering tx with hash invalid here - we just want // to remove it along with dependent transactions and `remove_subtree()` @@ -390,7 +404,8 @@ impl ValidatedPool { final_statuses.insert(hash, Status::Failed); }, }, - ValidatedTransaction::Invalid(_, _) | ValidatedTransaction::Unknown(_, _) => { + ValidatedTransaction::Invalid(_, _) | + ValidatedTransaction::Unknown(_, _) => { final_statuses.insert(hash, Status::Failed); }, } @@ -425,12 +440,13 @@ impl ValidatedPool { /// For each extrinsic, returns tags that it provides (if known), or None (if it is unknown). pub fn extrinsics_tags(&self, hashes: &[ExtrinsicHash]) -> Vec>> { - self.pool.read() + self.pool + .read() .by_hashes(&hashes) .into_iter() - .map(|existing_in_pool| + .map(|existing_in_pool| { existing_in_pool.map(|transaction| transaction.provides.to_vec()) - ) + }) .collect() } @@ -442,7 +458,7 @@ impl ValidatedPool { /// Prunes ready transactions that provide given list of tags. pub fn prune_tags( &self, - tags: impl IntoIterator, + tags: impl IntoIterator, ) -> Result, ExtrinsicFor>, B::Error> { // Perform tag-based pruning in the base pool let status = self.pool.write().prune_tags(tags); @@ -465,7 +481,7 @@ impl ValidatedPool { pub fn resubmit_pruned( &self, at: &BlockId, - known_imported_hashes: impl IntoIterator> + Clone, + known_imported_hashes: impl IntoIterator> + Clone, pruned_hashes: Vec>, pruned_xts: Vec>, ) -> Result<(), B::Error> { @@ -475,13 +491,12 @@ impl ValidatedPool { let results = self.submit(pruned_xts); // Collect the hashes of transactions that now became invalid (meaning that they are successfully pruned). - let hashes = results - .into_iter() - .enumerate() - .filter_map(|(idx, r)| match r.map_err(error::IntoPoolError::into_pool_error) { + let hashes = results.into_iter().enumerate().filter_map(|(idx, r)| { + match r.map_err(error::IntoPoolError::into_pool_error) { Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx]), _ => None, - }); + } + }); // Fire `pruned` notifications for collected hashes and make sure to include // `known_imported_hashes` since they were just imported as part of the block. let hashes = hashes.chain(known_imported_hashes.into_iter()); @@ -497,9 +512,11 @@ impl ValidatedPool { pub fn fire_pruned( &self, at: &BlockId, - hashes: impl Iterator>, + hashes: impl Iterator>, ) -> Result<(), B::Error> { - let header_hash = self.api.block_id_to_hash(at)? + let header_hash = self + .api + .block_id_to_hash(at)? .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))?; let mut listener = self.listener.write(); let mut set = HashSet::with_capacity(hashes.size_hint().0); @@ -520,7 +537,9 @@ impl ValidatedPool { /// Note this function does not remove transactions that are already included in the chain. /// See `prune_tags` if you want this. pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { - let block_number = self.api.block_id_to_number(at)? + let block_number = self + .api + .block_id_to_number(at)? .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))? .saturated_into::(); let now = Instant::now(); @@ -589,7 +608,7 @@ impl ValidatedPool { pub fn remove_invalid(&self, hashes: &[ExtrinsicHash]) -> Vec> { // early exit in case there is no invalid transactions. if hashes.is_empty() { - return vec![]; + return vec![] } log::debug!(target: "txpool", "Removing invalid transactions: {:?}", hashes); @@ -610,13 +629,15 @@ impl ValidatedPool { } /// Get an iterator for ready transactions ordered by priority - pub fn ready(&self) -> impl Iterator> + Send { + pub fn ready(&self) -> impl Iterator> + Send { self.pool.read().ready() } /// Returns a Vec of hashes and extrinsics in the future pool. pub fn futures(&self) -> Vec<(ExtrinsicHash, ExtrinsicFor)> { - self.pool.read().futures() + self.pool + .read() + .futures() .map(|tx| (tx.hash.clone(), tx.data.clone())) .collect() } @@ -639,10 +660,8 @@ impl ValidatedPool { } } -fn fire_events( - listener: &mut Listener, - imported: &base::Imported, -) where +fn fire_events(listener: &mut Listener, imported: &base::Imported) +where H: hash::Hash + Eq + traits::Member + Serialize, B: ChainApi, { @@ -653,8 +672,6 @@ fn fire_events( removed.into_iter().for_each(|r| listener.dropped(&r.hash, Some(hash))); promoted.into_iter().for_each(|p| listener.ready(p, None)); }, - base::Imported::Future { ref hash } => { - listener.future(hash) - }, + base::Imported::Future { ref hash } => listener.future(hash), } } diff --git a/substrate/client/transaction-pool/src/graph/watcher.rs b/substrate/client/transaction-pool/src/graph/watcher.rs index 64e6032f0c2d5095f561f9fb000ef6b6ba375e50..91777117efe94d5ea0e40f64bf3d82dce269c051 100644 --- a/substrate/client/transaction-pool/src/graph/watcher.rs +++ b/substrate/client/transaction-pool/src/graph/watcher.rs @@ -20,7 +20,7 @@ use futures::Stream; use sc_transaction_pool_api::TransactionStatus; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; /// Extrinsic watcher. /// @@ -41,7 +41,7 @@ impl Watcher { /// Pipe the notifications to given sink. /// /// Make sure to drive the future to completion. - pub fn into_stream(self) -> impl Stream> { + pub fn into_stream(self) -> impl Stream> { self.receiver } } @@ -55,10 +55,7 @@ pub struct Sender { impl Default for Sender { fn default() -> Self { - Sender { - receivers: Default::default(), - is_finalized: false, - } + Sender { receivers: Default::default(), is_finalized: false } } } @@ -67,10 +64,7 @@ impl Sender { pub fn new_watcher(&mut self, hash: H) -> Watcher { let (tx, receiver) = tracing_unbounded("mpsc_txpool_watcher"); self.receivers.push(tx); - Watcher { - receiver, - hash, - } + Watcher { receiver, hash } } /// Transaction became ready. diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 8f89063657c045077b662ea823186a3d61cdd95f..302c7a1b59b65afeea807f190b47acae04ddf22b 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -18,14 +18,14 @@ //! Substrate transaction pool implementation. -#![recursion_limit="256"] +#![recursion_limit = "256"] #![warn(missing_docs)] #![warn(unused_extern_crates)] mod api; mod graph; -mod revalidation; mod metrics; +mod revalidation; pub mod error; @@ -33,53 +33,60 @@ pub mod error; #[cfg(feature = "test-helpers")] pub mod test_helpers { pub use super::{ - graph::{ChainApi, Pool, NumberFor, BlockHash, ExtrinsicFor}, + graph::{BlockHash, ChainApi, ExtrinsicFor, NumberFor, Pool}, revalidation::RevalidationQueue, }; } -pub use graph::{Options, Transaction}; pub use crate::api::{FullChainApi, LightChainApi}; -use std::{collections::{HashMap, HashSet}, sync::Arc, pin::Pin, convert::TryInto}; -use futures::{prelude::*, future::{self, ready}, channel::oneshot}; +use futures::{ + channel::oneshot, + future::{self, ready}, + prelude::*, +}; +pub use graph::{Options, Transaction}; use parking_lot::Mutex; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, + pin::Pin, + sync::Arc, +}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, NumberFor, AtLeast32Bit, Extrinsic, Zero, Header as HeaderT}, +use graph::{ExtrinsicHash, IsValidator}; +use sc_transaction_pool_api::{ + ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolFuture, PoolStatus, + TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_core::traits::SpawnEssentialNamed; -use sc_transaction_pool_api::{ - TransactionPool, PoolStatus, ImportNotificationStream, TxHash, TransactionFor, - TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent, - TransactionSource, +use sp_runtime::{ + generic::BlockId, + traits::{AtLeast32Bit, Block as BlockT, Extrinsic, Header as HeaderT, NumberFor, Zero}, }; -use graph::{IsValidator, ExtrinsicHash}; use wasm_timer::Instant; -use prometheus_endpoint::Registry as PrometheusRegistry; use crate::metrics::MetricsLink as PrometheusMetrics; +use prometheus_endpoint::Registry as PrometheusRegistry; -type BoxedReadyIterator = Box< - dyn Iterator>> + Send ->; +type BoxedReadyIterator = + Box>> + Send>; -type ReadyIteratorFor = BoxedReadyIterator< - graph::ExtrinsicHash, graph::ExtrinsicFor ->; +type ReadyIteratorFor = + BoxedReadyIterator, graph::ExtrinsicFor>; -type PolledIterator = Pin> + Send>>; +type PolledIterator = Pin> + Send>>; /// A transaction pool for a full node. pub type FullPool = BasicPool, Block>; /// A transaction pool for a light node. -pub type LightPool = BasicPool, Block>; +pub type LightPool = + BasicPool, Block>; /// Basic implementation of transaction pool that can be customized by providing PoolApi. pub struct BasicPool - where - Block: BlockT, - PoolApi: graph::ChainApi, +where + Block: BlockT, + PoolApi: graph::ChainApi, { pool: Arc>, api: Arc, @@ -96,19 +103,13 @@ struct ReadyPoll { impl Default for ReadyPoll { fn default() -> Self { - Self { - updated_at: NumberFor::::zero(), - pollers: Default::default(), - } + Self { updated_at: NumberFor::::zero(), pollers: Default::default() } } } impl ReadyPoll { fn new(best_block_number: NumberFor) -> Self { - Self { - updated_at: best_block_number, - pollers: Default::default(), - } + Self { updated_at: best_block_number, pollers: Default::default() } } fn trigger(&mut self, number: NumberFor, iterator_factory: impl Fn() -> T) { @@ -140,7 +141,7 @@ impl ReadyPoll { #[cfg(not(target_os = "unknown"))] impl parity_util_mem::MallocSizeOf for BasicPool where - PoolApi: graph::ChainApi, + PoolApi: graph::ChainApi, Block: BlockT, { fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { @@ -167,15 +168,15 @@ pub enum RevalidationType { } impl BasicPool - where - Block: BlockT, - PoolApi: graph::ChainApi + 'static, +where + Block: BlockT, + PoolApi: graph::ChainApi + 'static, { /// Create new basic transaction pool with provided api, for tests. #[cfg(feature = "test-helpers")] pub fn new_test( pool_api: Arc, - ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { + ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { let pool = Arc::new(graph::Pool::new(Default::default(), true.into(), pool_api.clone())); let (revalidation_queue, background_task, notifier) = revalidation::RevalidationQueue::new_test(pool_api.clone(), pool.clone()); @@ -206,15 +207,11 @@ impl BasicPool ) -> Self { let pool = Arc::new(graph::Pool::new(options, is_validator, pool_api.clone())); let (revalidation_queue, background_task) = match revalidation_type { - RevalidationType::Light => ( - revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), - None, - ), + RevalidationType::Light => + (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), RevalidationType::Full => { - let (queue, background) = revalidation::RevalidationQueue::new_background( - pool_api.clone(), - pool.clone(), - ); + let (queue, background) = + revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); (queue, Some(background)) }, }; @@ -227,12 +224,11 @@ impl BasicPool api: pool_api, pool, revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new( - match revalidation_type { - RevalidationType::Light => RevalidationStrategy::Light(RevalidationStatus::NotScheduled), - RevalidationType::Full => RevalidationStrategy::Always, - } - )), + revalidation_strategy: Arc::new(Mutex::new(match revalidation_type { + RevalidationType::Light => + RevalidationStrategy::Light(RevalidationStatus::NotScheduled), + RevalidationType::Full => RevalidationStrategy::Always, + })), ready_poll: Arc::new(Mutex::new(ReadyPoll::new(best_block_number))), metrics: PrometheusMetrics::new(prometheus), } @@ -251,15 +247,13 @@ impl BasicPool } impl TransactionPool for BasicPool - where - Block: BlockT, - PoolApi: 'static + graph::ChainApi, +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, { type Block = PoolApi::Block; type Hash = graph::ExtrinsicHash; - type InPoolTransaction = graph::base_pool::Transaction< - TxHash, TransactionFor - >; + type InPoolTransaction = graph::base_pool::Transaction, TransactionFor>; type Error = PoolApi::Error; fn submit_at( @@ -271,7 +265,8 @@ impl TransactionPool for BasicPool let pool = self.pool.clone(); let at = *at; - self.metrics.report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); + self.metrics + .report(|metrics| metrics.submitted_transactions.inc_by(xts.len() as u64)); async move { pool.submit_at(&at, source, xts).await }.boxed() } @@ -305,12 +300,14 @@ impl TransactionPool for BasicPool pool.submit_and_watch(&at, source, xt) .map(|result| result.map(|watcher| Box::new(watcher.into_stream()) as _)) .await - }.boxed() + } + .boxed() } fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { let removed = self.pool.validated_pool().remove_invalid(hashes); - self.metrics.report(|metrics| metrics.validations_invalid.inc_by(removed.len() as u64)); + self.metrics + .report(|metrics| metrics.validations_invalid.inc_by(removed.len() as u64)); removed } @@ -347,16 +344,18 @@ impl TransactionPool for BasicPool if self.ready_poll.lock().updated_at() >= at { log::trace!(target: "txpool", "Transaction pool already processed block #{}", at); let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return async move { iterator }.boxed(); + return async move { iterator }.boxed() } self.ready_poll .lock() .add(at) - .map(|received| received.unwrap_or_else(|e| { - log::warn!("Error receiving pending set: {:?}", e); - Box::new(std::iter::empty()) - })) + .map(|received| { + received.unwrap_or_else(|e| { + log::warn!("Error receiving pending set: {:?}", e); + Box::new(std::iter::empty()) + }) + }) .boxed() } @@ -452,9 +451,10 @@ where at: &BlockId, xt: sc_transaction_pool_api::LocalTransactionFor, ) -> Result { - use graph::{ValidatedTransaction, ChainApi}; - use sp_runtime::traits::SaturatedConversion; - use sp_runtime::transaction_validity::TransactionValidityError; + use graph::{ChainApi, ValidatedTransaction}; + use sp_runtime::{ + traits::SaturatedConversion, transaction_validity::TransactionValidityError, + }; let validity = self .api @@ -527,10 +527,7 @@ impl RevalidationStrategy { ), resubmit: false, }, - Self::Always => RevalidationAction { - revalidate: true, - resubmit: true, - } + Self::Always => RevalidationAction { revalidate: true, resubmit: true }, } } } @@ -555,15 +552,16 @@ impl RevalidationStatus { revalidate_block_period.map(|period| block + period), ); false - } + }, Self::Scheduled(revalidate_at_time, revalidate_at_block) => { - let is_required = revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) - || revalidate_at_block.map(|at| block >= at).unwrap_or(false); + let is_required = + revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) || + revalidate_at_block.map(|at| block >= at).unwrap_or(false); if is_required { *self = Self::InProgress; } is_required - } + }, Self::InProgress => false, } } @@ -575,16 +573,16 @@ async fn prune_known_txs_for_block, ) -> Vec> { - let extrinsics = api.block_body(&block_id).await + let extrinsics = api + .block_body(&block_id) + .await .unwrap_or_else(|e| { log::warn!("Prune known transactions: error request {:?}!", e); None }) .unwrap_or_default(); - let hashes = extrinsics.iter() - .map(|tx| pool.hash_of(&tx)) - .collect::>(); + let hashes = extrinsics.iter().map(|tx| pool.hash_of(&tx)).collect::>(); log::trace!(target: "txpool", "Pruning transactions: {:?}", hashes); @@ -597,10 +595,11 @@ async fn prune_known_txs_for_block { log::debug!(target: "txpool", "Error retrieving header for {:?}: {:?}", block_id, e); return hashes - } + }, }; - if let Err(e) = pool.prune(&block_id, &BlockId::hash(*header.parent_hash()), &extrinsics).await { + if let Err(e) = pool.prune(&block_id, &BlockId::hash(*header.parent_hash()), &extrinsics).await + { log::error!("Cannot prune known in the pool {:?}!", e); } @@ -608,11 +607,11 @@ async fn prune_known_txs_for_block MaintainedTransactionPool for BasicPool - where - Block: BlockT, - PoolApi: 'static + graph::ChainApi, +where + Block: BlockT, + PoolApi: 'static + graph::ChainApi, { - fn maintain(&self, event: ChainEvent) -> Pin + Send>> { + fn maintain(&self, event: ChainEvent) -> Pin + Send>> { match event { ChainEvent::NewBestBlock { hash, tree_route } => { let pool = self.pool.clone(); @@ -627,8 +626,8 @@ impl MaintainedTransactionPool for BasicPool "Skipping chain event - no number for that block {:?}", id, ); - return Box::pin(ready(())); - } + return Box::pin(ready(())) + }, }; let next_action = self.revalidation_strategy.lock().next( @@ -657,27 +656,21 @@ impl MaintainedTransactionPool for BasicPool pool.validated_pool().on_block_retracted(retracted.hash.clone()); } - future::join_all( - tree_route - .enacted() - .iter() - .map(|h| - prune_known_txs_for_block( - BlockId::Hash(h.hash.clone()), - &*api, - &*pool, - ), - ), - ).await.into_iter().for_each(|enacted_log|{ + future::join_all(tree_route.enacted().iter().map(|h| { + prune_known_txs_for_block(BlockId::Hash(h.hash.clone()), &*api, &*pool) + })) + .await + .into_iter() + .for_each(|enacted_log| { pruned_log.extend(enacted_log); }) } pruned_log.extend(prune_known_txs_for_block(id.clone(), &*api, &*pool).await); - metrics.report( - |metrics| metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) - ); + metrics.report(|metrics| { + metrics.block_transactions_pruned.inc_by(pruned_log.len() as u64) + }); if let (true, Some(tree_route)) = (next_action.resubmit, tree_route) { let mut resubmit_transactions = Vec::new(); @@ -685,7 +678,8 @@ impl MaintainedTransactionPool for BasicPool for retracted in tree_route.retracted() { let hash = retracted.hash.clone(); - let block_transactions = api.block_body(&BlockId::hash(hash)) + let block_transactions = api + .block_body(&BlockId::hash(hash)) .await .unwrap_or_else(|e| { log::warn!("Failed to fetch block body {:?}!", e); @@ -697,8 +691,8 @@ impl MaintainedTransactionPool for BasicPool let mut resubmitted_to_report = 0; - resubmit_transactions.extend( - block_transactions.into_iter().filter(|tx| { + resubmit_transactions.extend(block_transactions.into_iter().filter( + |tx| { let tx_hash = pool.hash_of(&tx); let contains = pruned_log.contains(&tx_hash); @@ -714,21 +708,24 @@ impl MaintainedTransactionPool for BasicPool ); } !contains - }) - ); + }, + )); - metrics.report( - |metrics| metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) - ); + metrics.report(|metrics| { + metrics.block_transactions_resubmitted.inc_by(resubmitted_to_report) + }); } - if let Err(e) = pool.resubmit_at( - &id, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - ).await { + if let Err(e) = pool + .resubmit_at( + &id, + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + ) + .await + { log::debug!( target: "txpool", "[{:?}] Error re-submitting transactions: {:?}", @@ -741,22 +738,20 @@ impl MaintainedTransactionPool for BasicPool let extra_pool = pool.clone(); // After #5200 lands, this arguably might be moved to the // handler of "all blocks notification". - ready_poll.lock().trigger( - block_number, - move || Box::new(extra_pool.validated_pool().ready()), - ); + ready_poll.lock().trigger(block_number, move || { + Box::new(extra_pool.validated_pool().ready()) + }); if next_action.revalidate { - let hashes = pool.validated_pool() - .ready() - .map(|tx| tx.hash.clone()) - .collect(); + let hashes = + pool.validated_pool().ready().map(|tx| tx.hash.clone()).collect(); revalidation_queue.revalidate_later(block_number, hashes).await; revalidation_strategy.lock().clear(); } - }.boxed() - } + } + .boxed() + }, ChainEvent::Finalized { hash } => { let pool = self.pool.clone(); async move { @@ -767,28 +762,25 @@ impl MaintainedTransactionPool for BasicPool e, hash ) } - }.boxed() - } + } + .boxed() + }, } } } /// Inform the transaction pool about imported and finalized blocks. -pub async fn notification_future( - client: Arc, - txpool: Arc -) - where - Block: BlockT, - Client: sc_client_api::BlockchainEvents, - Pool: MaintainedTransactionPool, +pub async fn notification_future(client: Arc, txpool: Arc) +where + Block: BlockT, + Client: sc_client_api::BlockchainEvents, + Pool: MaintainedTransactionPool, { - let import_stream = client.import_notification_stream() + let import_stream = client + .import_notification_stream() .filter_map(|n| ready(n.try_into().ok())) .fuse(); - let finality_stream = client.finality_notification_stream() - .map(Into::into) - .fuse(); + let finality_stream = client.finality_notification_stream().map(Into::into).fuse(); futures::stream::select(import_stream, finality_stream) .for_each(|evt| txpool.maintain(evt)) diff --git a/substrate/client/transaction-pool/src/metrics.rs b/substrate/client/transaction-pool/src/metrics.rs index e0b70183a86b2c9fb594937085390b4528e20095..d62d64f13a0a441559aa0d505d4d281a71ed09db 100644 --- a/substrate/client/transaction-pool/src/metrics.rs +++ b/substrate/client/transaction-pool/src/metrics.rs @@ -27,13 +27,13 @@ pub struct MetricsLink(Arc>); impl MetricsLink { pub fn new(registry: Option<&Registry>) -> Self { - Self(Arc::new( - registry.and_then(|registry| - Metrics::register(registry) - .map_err(|err| { log::warn!("Failed to register prometheus metrics: {}", err); }) - .ok() - ) - )) + Self(Arc::new(registry.and_then(|registry| { + Metrics::register(registry) + .map_err(|err| { + log::warn!("Failed to register prometheus metrics: {}", err); + }) + .ok() + }))) } pub fn report(&self, do_this: impl FnOnce(&Metrics)) { diff --git a/substrate/client/transaction-pool/src/revalidation.rs b/substrate/client/transaction-pool/src/revalidation.rs index ffc82bf619cccd6fc5bec76e0282a540f1b69a5a..9f15185694d0a576cca536eca02e05c8f361e3f2 100644 --- a/substrate/client/transaction-pool/src/revalidation.rs +++ b/substrate/client/transaction-pool/src/revalidation.rs @@ -18,13 +18,19 @@ //! Pool periodic revalidation. -use std::{sync::Arc, pin::Pin, collections::{HashMap, HashSet, BTreeMap}}; - -use crate::graph::{ChainApi, Pool, ExtrinsicHash, NumberFor, ValidatedTransaction}; -use sp_runtime::traits::{Zero, SaturatedConversion}; -use sp_runtime::generic::BlockId; -use sp_runtime::transaction_validity::TransactionValidityError; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + pin::Pin, + sync::Arc, +}; + +use crate::graph::{ChainApi, ExtrinsicHash, NumberFor, Pool, ValidatedTransaction}; +use sp_runtime::{ + generic::BlockId, + traits::{SaturatedConversion, Zero}, + transaction_validity::TransactionValidityError, +}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use futures::prelude::*; use std::time::Duration; @@ -63,19 +69,18 @@ async fn batch_revalidate( pool: Arc>, api: Arc, at: NumberFor, - batch: impl IntoIterator>, + batch: impl IntoIterator>, ) { let mut invalid_hashes = Vec::new(); let mut revalidated = HashMap::new(); - let validation_results = futures::future::join_all( - batch.into_iter().filter_map(|ext_hash| { - pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { - api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()) - .map(move |validation_result| (validation_result, ext_hash, ext)) - }) + let validation_results = futures::future::join_all(batch.into_iter().filter_map(|ext_hash| { + pool.validated_pool().ready_by_hash(&ext_hash).map(|ext| { + api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()) + .map(move |validation_result| (validation_result, ext_hash, ext)) }) - ).await; + })) + .await; for (validation_result, ext_hash, ext) in validation_results { match validation_result { @@ -98,7 +103,7 @@ async fn batch_revalidate( ext.data.clone(), api.hash_and_length(&ext.data).1, validity, - ) + ), ); }, Err(validation_err) => { @@ -109,7 +114,7 @@ async fn batch_revalidate( validation_err ); invalid_hashes.push(ext_hash); - } + }, } } @@ -120,10 +125,7 @@ async fn batch_revalidate( } impl RevalidationWorker { - fn new( - api: Arc, - pool: Arc>, - ) -> Self { + fn new(api: Arc, pool: Arc>) -> Self { Self { api, pool, @@ -135,7 +137,8 @@ impl RevalidationWorker { fn prepare_batch(&mut self) -> Vec> { let mut queued_exts = Vec::new(); - let mut left = std::cmp::max(MIN_BACKGROUND_REVALIDATION_BATCH_SIZE, self.members.len() / 4); + let mut left = + std::cmp::max(MIN_BACKGROUND_REVALIDATION_BATCH_SIZE, self.members.len() / 4); // Take maximum of count transaction by order // which they got into the pool @@ -188,11 +191,14 @@ impl RevalidationWorker { ext_hash, ); - continue; + continue } - self.block_ordered.entry(block_number) - .and_modify(|value| { value.insert(ext_hash.clone()); }) + self.block_ordered + .entry(block_number) + .and_modify(|value| { + value.insert(ext_hash.clone()); + }) .or_insert_with(|| { let mut bt = HashSet::new(); bt.insert(ext_hash.clone()); @@ -211,7 +217,10 @@ impl RevalidationWorker { mut self, from_queue: TracingUnboundedReceiver>, interval: R, - ) where R: Send, R::Guard: Send { + ) where + R: Send, + R::Guard: Send, + { let interval = interval.into_stream().fuse(); let from_queue = from_queue.fuse(); futures::pin_mut!(interval, from_queue); @@ -269,7 +278,6 @@ impl RevalidationWorker { } } - /// Revalidation queue. /// /// Can be configured background (`new_background`) @@ -286,11 +294,7 @@ where { /// New revalidation queue without background worker. pub fn new(api: Arc, pool: Arc>) -> Self { - Self { - api, - pool, - background: None, - } + Self { api, pool, background: None } } /// New revalidation queue with background worker. @@ -298,34 +302,40 @@ where api: Arc, pool: Arc>, interval: R, - ) -> (Self, Pin + Send>>) where R: Send + 'static, R::Guard: Send { + ) -> (Self, Pin + Send>>) + where + R: Send + 'static, + R::Guard: Send, + { let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue"); let worker = RevalidationWorker::new(api.clone(), pool.clone()); - let queue = - Self { - api, - pool, - background: Some(to_worker), - }; + let queue = Self { api, pool, background: Some(to_worker) }; (queue, worker.run(from_queue, interval).boxed()) } /// New revalidation queue with background worker. - pub fn new_background(api: Arc, pool: Arc>) -> - (Self, Pin + Send>>) - { - Self::new_with_interval(api, pool, intervalier::Interval::new(BACKGROUND_REVALIDATION_INTERVAL)) + pub fn new_background( + api: Arc, + pool: Arc>, + ) -> (Self, Pin + Send>>) { + Self::new_with_interval( + api, + pool, + intervalier::Interval::new(BACKGROUND_REVALIDATION_INTERVAL), + ) } /// New revalidation queue with background worker and test signal. #[cfg(feature = "test-helpers")] - pub fn new_test(api: Arc, pool: Arc>) -> - (Self, Pin + Send>>, intervalier::BackSignalControl) - { - let (interval, notifier) = intervalier::BackSignalInterval::new(BACKGROUND_REVALIDATION_INTERVAL); + pub fn new_test( + api: Arc, + pool: Arc>, + ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { + let (interval, notifier) = + intervalier::BackSignalInterval::new(BACKGROUND_REVALIDATION_INTERVAL); let (queue, background) = Self::new_with_interval(api, pool, interval); (queue, background, notifier) @@ -361,6 +371,4 @@ where } #[cfg(test)] -mod tests { - -} +mod tests {} diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 9a9d59214d0b9a43d965009c1fd5fc63f43947b3..6c34d05cd5dcb6af62832d7190e8d111f71e697b 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -17,37 +17,40 @@ // along with this program. If not, see . //! Tests for top-level transaction pool api -use sc_transaction_pool_api::{TransactionStatus, ChainEvent, MaintainedTransactionPool, TransactionPool}; -use futures::executor::{block_on, block_on_stream}; +use codec::Encode; +use futures::{ + executor::{block_on, block_on_stream}, + prelude::*, + task::Poll, +}; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::client::BlockchainEvents; +use sc_transaction_pool::{test_helpers::*, *}; +use sc_transaction_pool_api::{ + ChainEvent, MaintainedTransactionPool, TransactionPool, TransactionStatus, +}; +use sp_consensus::BlockOrigin; use sp_runtime::{ - generic::BlockId, traits::Block as _, - transaction_validity::{ValidTransaction, TransactionSource, InvalidTransaction}, + generic::BlockId, + traits::Block as _, + transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; +use std::{collections::BTreeSet, convert::TryInto, sync::Arc}; use substrate_test_runtime_client::{ - runtime::{Block, Hash, Index, Header, Extrinsic, Transfer}, AccountKeyring::*, + runtime::{Block, Extrinsic, Hash, Header, Index, Transfer}, + AccountKeyring::*, ClientBlockImportExt, }; -use substrate_test_runtime_transaction_pool::{TestApi, uxt}; -use futures::{prelude::*, task::Poll}; -use codec::Encode; -use std::{collections::BTreeSet, sync::Arc, convert::TryInto}; -use sc_client_api::client::BlockchainEvents; -use sc_block_builder::BlockBuilderProvider; -use sp_consensus::BlockOrigin; -use sc_transaction_pool::{*, test_helpers::*}; +use substrate_test_runtime_transaction_pool::{uxt, TestApi}; fn pool() -> Pool { Pool::new(Default::default(), true.into(), TestApi::with_alice_nonce(209).into()) } -fn maintained_pool() -> ( - BasicPool, - futures::executor::ThreadPool, - intervalier::BackSignalControl, -) { - let (pool, background_task, notifier) = BasicPool::new_test( - Arc::new(TestApi::with_alice_nonce(209)), - ); +fn maintained_pool( +) -> (BasicPool, futures::executor::ThreadPool, intervalier::BackSignalControl) { + let (pool, background_task, notifier) = + BasicPool::new_test(Arc::new(TestApi::with_alice_nonce(209))); let thread_pool = futures::executor::ThreadPool::new().unwrap(); thread_pool.spawn_ok(background_task); @@ -107,13 +110,8 @@ fn prune_tags_should_work() { assert_eq!(pending, vec![209, 210]); pool.validated_pool().api().push_block(1, Vec::new(), true); - block_on( - pool.prune_tags( - &BlockId::number(1), - vec![vec![209]], - vec![hash209], - ) - ).expect("Prune tags"); + block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![hash209])) + .expect("Prune tags"); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![210]); @@ -140,17 +138,13 @@ fn only_prune_on_new_best() { let pool = maintained_pool().0; let uxt = uxt(Alice, 209); - let _ = block_on( - pool.submit_and_watch(&BlockId::number(0), SOURCE, uxt.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(0), SOURCE, uxt.clone())) + .expect("1. Imported"); pool.api().push_block(1, vec![uxt.clone()], true); assert_eq!(pool.status().ready, 1); let header = pool.api().push_block(2, vec![uxt], true); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); } @@ -193,10 +187,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { } fn block_event(header: Header) -> ChainEvent { - ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - } + ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None } } fn block_event_with_retracted( @@ -204,12 +195,10 @@ fn block_event_with_retracted( retracted_start: Hash, api: &TestApi, ) -> ChainEvent { - let tree_route = api.tree_route(retracted_start, header.parent_hash).expect("Tree route exists"); + let tree_route = + api.tree_route(retracted_start, header.parent_hash).expect("Tree route exists"); - ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: Some(Arc::new(tree_route)), - } + ChainEvent::NewBestBlock { hash: header.hash(), tree_route: Some(Arc::new(tree_route)) } } #[test] @@ -266,7 +255,6 @@ fn should_resubmit_from_retracted_during_maintenance() { assert_eq!(pool.status().ready, 1); } - #[test] fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacted() { let xt = uxt(Alice, 209); @@ -334,7 +322,6 @@ fn should_revalidate_across_many_blocks() { assert_eq!(pool.api().validation_requests().len(), 7); } - #[test] fn should_push_watchers_during_maintenance() { fn alice_uxt(nonce: u64) -> Extrinsic { @@ -345,25 +332,20 @@ fn should_push_watchers_during_maintenance() { let (pool, _guard, mut notifier) = maintained_pool(); let tx0 = alice_uxt(0); - let watcher0 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone()) - ).unwrap(); + let watcher0 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone())).unwrap(); let tx1 = alice_uxt(1); - let watcher1 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone()) - ).unwrap(); + let watcher1 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone())).unwrap(); let tx2 = alice_uxt(2); - let watcher2 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone()) - ).unwrap(); + let watcher2 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone())).unwrap(); let tx3 = alice_uxt(3); - let watcher3 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone()) - ).unwrap(); + let watcher3 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone())).unwrap(); let tx4 = alice_uxt(4); - let watcher4 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone()) - ).unwrap(); + let watcher4 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone())).unwrap(); assert_eq!(pool.status().ready, 5); // when @@ -405,21 +387,24 @@ fn should_push_watchers_during_maintenance() { vec![ TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone())], + TransactionStatus::Finalized(header_hash.clone()) + ], ); assert_eq!( futures::executor::block_on_stream(watcher1).collect::>(), vec![ TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone())], + TransactionStatus::Finalized(header_hash.clone()) + ], ); assert_eq!( futures::executor::block_on_stream(watcher2).collect::>(), vec![ TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), - TransactionStatus::Finalized(header_hash.clone())], + TransactionStatus::Finalized(header_hash.clone()) + ], ); } @@ -440,16 +425,12 @@ fn finalization() { let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![], true); let (pool, _background, _) = BasicPool::new_test(api.into()); - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) - ).expect("1. Imported"); + let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + .expect("1. Imported"); pool.api().push_block(2, vec![xt.clone()], true); let header = pool.api().chain().read().block_by_number.get(&2).unwrap()[0].0.header().clone(); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); let event = ChainEvent::Finalized { hash: header.hash() }; @@ -489,17 +470,14 @@ fn fork_aware_finalization() { // block B1 { - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()) - ).expect("1. Imported"); + let watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); let header = pool.api().push_block(2, vec![from_alice.clone()], true); canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; b1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -510,14 +488,11 @@ fn fork_aware_finalization() { // block C2 { let header = pool.api().push_block_with_parent(b1, vec![from_dave.clone()], true); - from_dave_watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone()) - ).expect("1. Imported"); + from_dave_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; c2 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -525,16 +500,13 @@ fn fork_aware_finalization() { // block D2 { - from_bob_watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()) - ).expect("1. Imported"); + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = pool.api().push_block_with_parent(c2, vec![from_bob.clone()], true); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d2 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -542,9 +514,9 @@ fn fork_aware_finalization() { // block C1 { - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone()) - ).expect("1.Imported"); + let watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) + .expect("1.Imported"); assert_eq!(pool.status().ready, 1); let header = pool.api().push_block(3, vec![from_charlie.clone()], true); @@ -560,17 +532,13 @@ fn fork_aware_finalization() { // block D1 { let xt = uxt(Eve, 0); - let w = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) - ).expect("1. Imported"); + let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + .expect("1. Imported"); assert_eq!(pool.status().ready, 3); let header = pool.api().push_block(4, vec![xt.clone()], true); canon_watchers.push((w, header.hash())); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d1 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 2); @@ -584,16 +552,12 @@ fn fork_aware_finalization() { { let header = pool.api().push_block(5, vec![from_dave, from_bob], true); e1 = header.hash(); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); block_on(pool.maintain(ChainEvent::Finalized { hash: e1 })); } - for (canon_watcher, h) in canon_watchers { let mut stream = futures::executor::block_on_stream(canon_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -602,7 +566,6 @@ fn fork_aware_finalization() { assert_eq!(stream.next(), None); } - { let mut stream = futures::executor::block_on_stream(from_dave_watcher); assert_eq!(stream.next(), Some(TransactionStatus::Ready)); @@ -639,19 +602,15 @@ fn prune_and_retract_tx_at_same_time() { let from_alice = uxt(Alice, 1); pool.api().increment_nonce(Alice.into()); - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()) - ).expect("1. Imported"); + let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); // Block B1 let b1 = { let header = pool.api().push_block(2, vec![from_alice.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); header.hash() @@ -683,7 +642,6 @@ fn prune_and_retract_tx_at_same_time() { } } - /// This test ensures that transactions from a fork are re-submitted if /// the forked block is not part of the retracted blocks. This happens as the /// retracted block list only contains the route from the old best to the new @@ -716,16 +674,12 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // Block D0 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); let header = pool.api().push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBestBlock { - hash: header.hash(), - tree_route: None, - }; + let event = ChainEvent::NewBestBlock { hash: header.hash(), tree_route: None }; d0 = header.hash(); block_on(pool.maintain(event)); assert_eq!(pool.status().ready, 0); @@ -733,9 +687,8 @@ fn resubmit_tx_of_fork_that_is_not_part_of_retracted() { // Block D1 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); pool.api().push_block(2, vec![tx1.clone()], false); assert_eq!(pool.status().ready, 1); } @@ -775,9 +728,8 @@ fn resubmit_from_retracted_fork() { // Block D0 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx0.clone())) + .expect("1. Imported"); let header = pool.api().push_block(2, vec![tx0.clone()], true); assert_eq!(pool.status().ready, 1); @@ -787,9 +739,8 @@ fn resubmit_from_retracted_fork() { // Block E0 { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx1.clone())) + .expect("1. Imported"); let header = pool.api().push_block(3, vec![tx1.clone()], true); block_on(pool.maintain(block_event(header))); assert_eq!(pool.status().ready, 0); @@ -797,9 +748,8 @@ fn resubmit_from_retracted_fork() { // Block F0 let f0 = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx2.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx2.clone())) + .expect("1. Imported"); let header = pool.api().push_block(4, vec![tx2.clone()], true); block_on(pool.maintain(block_event(header.clone()))); assert_eq!(pool.status().ready, 0); @@ -808,9 +758,8 @@ fn resubmit_from_retracted_fork() { // Block D1 let d1 = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx3.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx3.clone())) + .expect("1. Imported"); let header = pool.api().push_block(2, vec![tx3.clone()], true); assert_eq!(pool.status().ready, 1); header.hash() @@ -818,9 +767,8 @@ fn resubmit_from_retracted_fork() { // Block E1 let e1 = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx4.clone())) + .expect("1. Imported"); let header = pool.api().push_block_with_parent(d1.clone(), vec![tx4.clone()], true); assert_eq!(pool.status().ready, 2); header.hash() @@ -828,9 +776,8 @@ fn resubmit_from_retracted_fork() { // Block F1 let f1_header = { - let _ = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone()) - ).expect("1. Imported"); + let _ = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, tx5.clone())) + .expect("1. Imported"); let header = pool.api().push_block_with_parent(e1.clone(), vec![tx5.clone()], true); // Don't announce the block event to the pool directly, because we will // re-org to this block. @@ -892,14 +839,14 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { block_on(pool.maintain(block_event(header))); - match ready_set_future.poll_unpin(&mut context) { + match ready_set_future.poll_unpin(&mut context) { Poll::Pending => { panic!("Ready set should become ready after block update!"); }, Poll::Ready(iterator) => { let data = iterator.collect::>(); assert_eq!(data.len(), 1); - } + }, } } @@ -914,22 +861,22 @@ fn should_not_accept_old_signatures() { client, None, &sp_core::testing::TaskExecutor::new(), - ))).0 + ))) + .0, ); - let transfer = Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 0, - amount: 1, - }; + let transfer = Transfer { from: Alice.into(), to: Bob.into(), nonce: 0, amount: 1 }; let _bytes: sp_core::sr25519::Signature = transfer.using_encoded(|e| Alice.sign(e)).into(); // generated with schnorrkel 0.1.1 from `_bytes` - let old_singature = sp_core::sr25519::Signature::try_from(&hex::decode( - "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426\ - cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108" - ).expect("hex invalid")[..]).expect("signature construction failed"); + let old_singature = sp_core::sr25519::Signature::try_from( + &hex::decode( + "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426\ + cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108", + ) + .expect("hex invalid")[..], + ) + .expect("signature construction failed"); let xt = Extrinsic::Transfer { transfer, @@ -939,9 +886,9 @@ fn should_not_accept_old_signatures() { assert_matches::assert_matches!( block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())), - Err(error::Error::Pool( - sc_transaction_pool_api::error::Error::InvalidTransaction(InvalidTransaction::BadProof) - )), + Err(error::Error::Pool(sc_transaction_pool_api::error::Error::InvalidTransaction( + InvalidTransaction::BadProof + ))), "Should be invalid transaction with bad proof", ); } @@ -955,7 +902,8 @@ fn import_notification_to_pool_maintain_works() { client.clone(), None, &sp_core::testing::TaskExecutor::new(), - ))).0 + ))) + .0, ); // Prepare the extrisic, push it to the pool and check that it was added. @@ -1021,32 +969,16 @@ fn stale_transactions_are_pruned() { // Our initial transactions let xts = vec![ - Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 1, - amount: 1, - }, - Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 2, - amount: 1, - }, - Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 3, - amount: 1, - }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 1, amount: 1 }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 2, amount: 1 }, + Transfer { from: Alice.into(), to: Bob.into(), nonce: 3, amount: 1 }, ]; let (pool, _guard, _notifier) = maintained_pool(); xts.into_iter().for_each(|xt| { - block_on( - pool.submit_one(&BlockId::number(0), SOURCE, xt.into_signed_tx()), - ).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.into_signed_tx())) + .expect("1. Imported"); }); assert_eq!(pool.status().ready, 0); assert_eq!(pool.status().future, 3); @@ -1054,24 +986,9 @@ fn stale_transactions_are_pruned() { // Almost the same as our initial transactions, but with some different `amount`s to make them // generate a different hash let xts = vec![ - Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 1, - amount: 2, - }.into_signed_tx(), - Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 2, - amount: 2, - }.into_signed_tx(), - Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 3, - amount: 2, - }.into_signed_tx(), + Transfer { from: Alice.into(), to: Bob.into(), nonce: 1, amount: 2 }.into_signed_tx(), + Transfer { from: Alice.into(), to: Bob.into(), nonce: 2, amount: 2 }.into_signed_tx(), + Transfer { from: Alice.into(), to: Bob.into(), nonce: 3, amount: 2 }.into_signed_tx(), ]; // Import block diff --git a/substrate/client/transaction-pool/tests/revalidation.rs b/substrate/client/transaction-pool/tests/revalidation.rs index d720f09a7fce516e0c8ee2116220c763d7bdb7c2..b2c8225b78f58759d2b139e2d0ca06ea9840bf47 100644 --- a/substrate/client/transaction-pool/tests/revalidation.rs +++ b/substrate/client/transaction-pool/tests/revalidation.rs @@ -1,32 +1,32 @@ +use futures::executor::block_on; use sc_transaction_pool::test_helpers::{Pool, RevalidationQueue}; use sc_transaction_pool_api::TransactionSource; -use substrate_test_runtime_transaction_pool::{TestApi, uxt}; -use futures::executor::block_on; -use substrate_test_runtime_client::AccountKeyring::*; -use std::sync::Arc; use sp_runtime::generic::BlockId; +use std::sync::Arc; +use substrate_test_runtime_client::AccountKeyring::*; +use substrate_test_runtime_transaction_pool::{uxt, TestApi}; fn setup() -> (Arc, Pool) { - let test_api = Arc::new(TestApi::empty()); - let pool = Pool::new(Default::default(), true.into(), test_api.clone()); - (test_api, pool) + let test_api = Arc::new(TestApi::empty()); + let pool = Pool::new(Default::default(), true.into(), test_api.clone()); + (test_api, pool) } #[test] fn smoky() { - let (api, pool) = setup(); - let pool = Arc::new(pool); - let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); + let (api, pool) = setup(); + let pool = Arc::new(pool); + let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); - let uxt = uxt(Alice, 0); - let uxt_hash = block_on( - pool.submit_one(&BlockId::number(0), TransactionSource::External, uxt.clone()) - ).expect("Should be valid"); + let uxt = uxt(Alice, 0); + let uxt_hash = + block_on(pool.submit_one(&BlockId::number(0), TransactionSource::External, uxt.clone())) + .expect("Should be valid"); - block_on(queue.revalidate_later(0, vec![uxt_hash])); + block_on(queue.revalidate_later(0, vec![uxt_hash])); - // revalidated in sync offload 2nd time - assert_eq!(api.validation_requests().len(), 2); - // number of ready - assert_eq!(pool.validated_pool().status().ready, 1); -} \ No newline at end of file + // revalidated in sync offload 2nd time + assert_eq!(api.validation_requests().len(), 2); + // number of ready + assert_eq!(pool.validated_pool().status().ready, 1); +} diff --git a/substrate/frame/assets/src/benchmarking.rs b/substrate/frame/assets/src/benchmarking.rs index c6925df9ad88f01a6b9da132df055c087ec8893c..89a1308db1712513b5eb7d5105c32d4f397e6869 100644 --- a/substrate/frame/assets/src/benchmarking.rs +++ b/substrate/frame/assets/src/benchmarking.rs @@ -19,23 +19,26 @@ #![cfg(feature = "runtime-benchmarks")] -use sp_std::prelude::*; use super::*; -use sp_runtime::traits::Bounded; -use frame_system::RawOrigin as SystemOrigin; use frame_benchmarking::{ - benchmarks_instance_pallet, account, whitelisted_caller, whitelist_account, impl_benchmark_test_suite + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, + whitelisted_caller, +}; +use frame_support::{ + dispatch::UnfilteredDispatchable, + traits::{EnsureOrigin, Get}, }; -use frame_support::traits::Get; -use frame_support::{traits::EnsureOrigin, dispatch::UnfilteredDispatchable}; +use frame_system::RawOrigin as SystemOrigin; +use sp_runtime::traits::Bounded; +use sp_std::prelude::*; use crate::Pallet as Assets; const SEED: u32 = 0; -fn create_default_asset, I: 'static>(is_sufficient: bool) - -> (T::AccountId, ::Source) -{ +fn create_default_asset, I: 'static>( + is_sufficient: bool, +) -> (T::AccountId, ::Source) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let root = SystemOrigin::Root.into(); @@ -45,14 +48,16 @@ fn create_default_asset, I: 'static>(is_sufficient: bool) caller_lookup.clone(), is_sufficient, 1u32.into(), - ).is_ok()); + ) + .is_ok()); (caller, caller_lookup) } -fn create_default_minted_asset, I: 'static>(is_sufficient: bool, amount: T::Balance) - -> (T::AccountId, ::Source) -{ - let (caller, caller_lookup) = create_default_asset::(is_sufficient); +fn create_default_minted_asset, I: 'static>( + is_sufficient: bool, + amount: T::Balance, +) -> (T::AccountId, ::Source) { + let (caller, caller_lookup) = create_default_asset::(is_sufficient); if !is_sufficient { T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); } @@ -61,14 +66,17 @@ fn create_default_minted_asset, I: 'static>(is_sufficient: bool, am Default::default(), caller_lookup.clone(), amount, - ).is_ok()); + ) + .is_ok()); (caller, caller_lookup) } fn swap_is_sufficient, I: 'static>(s: &mut bool) { - Asset::::mutate(&T::AssetId::default(), |maybe_a| - if let Some(ref mut a) = maybe_a { sp_std::mem::swap(s, &mut a.is_sufficient) } - ); + Asset::::mutate(&T::AssetId::default(), |maybe_a| { + if let Some(ref mut a) = maybe_a { + sp_std::mem::swap(s, &mut a.is_sufficient) + } + }); } fn add_consumers, I: 'static>(minter: T::AccountId, n: u32) { @@ -79,7 +87,13 @@ fn add_consumers, I: 'static>(minter: T::AccountId, n: u32) { let target = account("consumer", i, SEED); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let target_lookup = T::Lookup::unlookup(target); - assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); + assert!(Assets::::mint( + origin.clone().into(), + Default::default(), + target_lookup, + 100u32.into() + ) + .is_ok()); } swap_is_sufficient::(&mut s); } @@ -91,7 +105,13 @@ fn add_sufficients, I: 'static>(minter: T::AccountId, n: u32) { for i in 0..n { let target = account("sufficient", i, SEED); let target_lookup = T::Lookup::unlookup(target); - assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); + assert!(Assets::::mint( + origin.clone().into(), + Default::default(), + target_lookup, + 100u32.into() + ) + .is_ok()); } swap_is_sufficient::(&mut s); } @@ -105,7 +125,8 @@ fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { Default::default(), minter_lookup, (100 * (n + 1)).into(), - ).unwrap(); + ) + .unwrap(); for i in 0..n { let target = account("approval", i, SEED); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); @@ -115,7 +136,8 @@ fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { Default::default(), target_lookup, 100u32.into(), - ).unwrap(); + ) + .unwrap(); } } diff --git a/substrate/frame/assets/src/extra_mutator.rs b/substrate/frame/assets/src/extra_mutator.rs index d86d78ce3e376697d9f8cfdfa3086c714b9b7eee..8c601b746346cf4ef36ae1968a2eab1827e38121 100644 --- a/substrate/frame/assets/src/extra_mutator.rs +++ b/substrate/frame/assets/src/extra_mutator.rs @@ -34,10 +34,7 @@ pub struct ExtraMutator, I: 'static = ()> { impl, I: 'static> Drop for ExtraMutator { fn drop(&mut self) { - debug_assert!( - self.commit().is_ok(), - "attempt to write to non-existent asset account" - ); + debug_assert!(self.commit().is_ok(), "attempt to write to non-existent asset account"); } } diff --git a/substrate/frame/assets/src/functions.rs b/substrate/frame/assets/src/functions.rs index c6b5391cff8608936a96d905827a6cea50ed9fc3..6e6847ad7dfb6f627f4ed24b37d37aca7ab3cdab 100644 --- a/substrate/frame/assets/src/functions.rs +++ b/substrate/frame/assets/src/functions.rs @@ -38,9 +38,7 @@ impl, I: 'static> Pallet { /// Get the total supply of an asset `id`. pub fn total_supply(id: T::AssetId) -> T::Balance { - Asset::::get(id) - .map(|x| x.supply) - .unwrap_or_else(Zero::zero) + Asset::::get(id).map(|x| x.supply).unwrap_or_else(Zero::zero) } pub(super) fn new_account( @@ -134,7 +132,7 @@ impl, I: 'static> Pallet { match frozen.checked_add(&details.min_balance) { Some(required) if rest < required => return Frozen, None => return Overflow, - _ => {} + _ => {}, } } @@ -171,9 +169,8 @@ impl, I: 'static> Pallet { let amount = if let Some(frozen) = T::Freezer::frozen_balance(id, who) { // Frozen balance: account CANNOT be deleted - let required = frozen - .checked_add(&details.min_balance) - .ok_or(ArithmeticError::Overflow)?; + let required = + frozen.checked_add(&details.min_balance).ok_or(ArithmeticError::Overflow)?; account.balance.saturating_sub(required) } else { let is_provider = false; @@ -219,7 +216,7 @@ impl, I: 'static> Pallet { Err(e) => { debug_assert!(false, "passed from reducible_balance; qed"); return Err(e.into()) - } + }, }; Ok(actual) @@ -268,12 +265,12 @@ impl, I: 'static> Pallet { ) -> DispatchResult { Self::increase_balance(id, beneficiary, amount, |details| -> DispatchResult { if let Some(check_issuer) = maybe_check_issuer { - ensure!( - &check_issuer == &details.issuer, - Error::::NoPermission - ); + ensure!(&check_issuer == &details.issuer, Error::::NoPermission); } - debug_assert!(T::Balance::max_value() - details.supply >= amount, "checked in prep; qed"); + debug_assert!( + T::Balance::max_value() - details.supply >= amount, + "checked in prep; qed" + ); details.supply = details.supply.saturating_add(amount); Ok(()) })?; @@ -295,7 +292,9 @@ impl, I: 'static> Pallet { &mut AssetDetails>, ) -> DispatchResult, ) -> DispatchResult { - if amount.is_zero() { return Ok(()) } + if amount.is_zero() { + return Ok(()) + } Self::can_increase(id, beneficiary, amount).into_result()?; Asset::::try_mutate(id, |maybe_details| -> DispatchResult { @@ -364,7 +363,9 @@ impl, I: 'static> Pallet { &mut AssetDetails>, ) -> DispatchResult, ) -> Result { - if amount.is_zero() { return Ok(amount) } + if amount.is_zero() { + return Ok(amount) + } let actual = Self::prep_debit(id, target, amount, f)?; diff --git a/substrate/frame/assets/src/impl_fungibles.rs b/substrate/frame/assets/src/impl_fungibles.rs index 71951bae1116587fbc019495c634356e833b1c30..4e85b20a1fbb17467190bed9d21e8e5860b7c36d 100644 --- a/substrate/frame/assets/src/impl_fungibles.rs +++ b/substrate/frame/assets/src/impl_fungibles.rs @@ -24,15 +24,11 @@ impl, I: 'static> fungibles::Inspect<::AccountId type Balance = T::Balance; fn total_issuance(asset: Self::AssetId) -> Self::Balance { - Asset::::get(asset) - .map(|x| x.supply) - .unwrap_or_else(Zero::zero) + Asset::::get(asset).map(|x| x.supply).unwrap_or_else(Zero::zero) } fn minimum_balance(asset: Self::AssetId) -> Self::Balance { - Asset::::get(asset) - .map(|x| x.min_balance) - .unwrap_or_else(Zero::zero) + Asset::::get(asset).map(|x| x.min_balance).unwrap_or_else(Zero::zero) } fn balance(asset: Self::AssetId, who: &::AccountId) -> Self::Balance { @@ -78,10 +74,7 @@ impl, I: 'static> fungibles::Mutate<::AccountId> who: &::AccountId, amount: Self::Balance, ) -> Result { - let f = DebitFlags { - keep_alive: false, - best_effort: false, - }; + let f = DebitFlags { keep_alive: false, best_effort: false }; Self::do_burn(asset, who, amount, None, f) } @@ -90,10 +83,7 @@ impl, I: 'static> fungibles::Mutate<::AccountId> who: &::AccountId, amount: Self::Balance, ) -> Result { - let f = DebitFlags { - keep_alive: false, - best_effort: true, - }; + let f = DebitFlags { keep_alive: false, best_effort: true }; Self::do_burn(asset, who, amount, None, f) } } @@ -106,11 +96,7 @@ impl, I: 'static> fungibles::Transfer for Pallet Result { - let f = TransferFlags { - keep_alive, - best_effort: false, - burn_dust: false - }; + let f = TransferFlags { keep_alive, best_effort: false, burn_dust: false }; Self::do_transfer(asset, source, dest, amount, None, f) } } @@ -126,28 +112,35 @@ impl, I: 'static> fungibles::Unbalanced for Pallet Result - { + fn decrease_balance( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Result { let f = DebitFlags { keep_alive: false, best_effort: false }; Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) } - fn decrease_balance_at_most(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) - -> Self::Balance - { + fn decrease_balance_at_most( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Self::Balance { let f = DebitFlags { keep_alive: false, best_effort: true }; - Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) - .unwrap_or(Zero::zero()) + Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())).unwrap_or(Zero::zero()) } - fn increase_balance(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) - -> Result - { + fn increase_balance( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Result { Self::increase_balance(asset, who, amount, |_| Ok(()))?; Ok(amount) } - fn increase_balance_at_most(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) - -> Self::Balance - { + fn increase_balance_at_most( + asset: T::AssetId, + who: &T::AccountId, + amount: Self::Balance, + ) -> Self::Balance { match Self::increase_balance(asset, who, amount, |_| Ok(())) { Ok(()) => amount, Err(_) => Zero::zero(), diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 5fe167df3f444141ae2a9f22c37a8c9e808d6dd0..65878672c9a7f17eadc5b0655c9302277141ac97 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -122,40 +122,49 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -pub mod weights; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[cfg(test)] pub mod mock; #[cfg(test)] mod tests; +pub mod weights; mod extra_mutator; pub use extra_mutator::*; -mod impl_stored_map; -mod impl_fungibles; mod functions; +mod impl_fungibles; +mod impl_stored_map; mod types; pub use types::*; -use sp_std::{prelude::*, borrow::Borrow, convert::TryInto}; -use sp_runtime::{ - TokenError, ArithmeticError, - traits::{AtLeast32BitUnsigned, Zero, StaticLookup, Saturating, CheckedSub, CheckedAdd, Bounded} -}; use codec::HasCompact; -use frame_support::pallet_prelude::*; -use frame_support::traits::{Currency, ReservableCurrency, BalanceStatus::Reserved, StoredMap}; -use frame_support::traits::tokens::{WithdrawConsequence, DepositConsequence, fungibles}; +use frame_support::{ + dispatch::{DispatchError, DispatchResult}, + ensure, + traits::{ + tokens::{fungibles, DepositConsequence, WithdrawConsequence}, + BalanceStatus::Reserved, + Currency, ReservableCurrency, StoredMap, + }, +}; use frame_system::Config as SystemConfig; +use sp_runtime::{ + traits::{ + AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, Saturating, StaticLookup, Zero, + }, + ArithmeticError, TokenError, +}; +use sp_std::{borrow::Borrow, convert::TryInto, prelude::*}; -pub use weights::WeightInfo; pub use pallet::*; +pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { - use frame_system::pallet_prelude::*; use super::*; + use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; + use frame_system::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -267,11 +276,7 @@ pub mod pallet { #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] - #[pallet::metadata( - T::AccountId = "AccountId", - T::Balance = "Balance", - T::AssetId = "AssetId" - )] + #[pallet::metadata(T::AccountId = "AccountId", T::Balance = "Balance", T::AssetId = "AssetId")] pub enum Event, I: 'static = ()> { /// Some asset class was created. \[asset_id, creator, owner\] Created(T::AssetId, T::AccountId, T::AccountId), @@ -514,13 +519,12 @@ pub mod pallet { } Self::deposit_event(Event::Destroyed(id)); - Ok( - Some(T::WeightInfo::destroy( - details.accounts.saturating_sub(details.sufficients), - details.sufficients, - details.approvals, - )).into() - ) + Ok(Some(T::WeightInfo::destroy( + details.accounts.saturating_sub(details.sufficients), + details.sufficients, + details.approvals, + )) + .into()) }) } @@ -541,7 +545,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] id: T::AssetId, beneficiary: ::Source, - #[pallet::compact] amount: T::Balance + #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -569,7 +573,7 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] id: T::AssetId, who: ::Source, - #[pallet::compact] amount: T::Balance + #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; @@ -602,16 +606,12 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] id: T::AssetId, target: ::Source, - #[pallet::compact] amount: T::Balance + #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(target)?; - let f = TransferFlags { - keep_alive: false, - best_effort: false, - burn_dust: false - }; + let f = TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; Self::do_transfer(id, &origin, &dest, amount, None, f).map(|_| ()) } @@ -638,16 +638,12 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] id: T::AssetId, target: ::Source, - #[pallet::compact] amount: T::Balance + #[pallet::compact] amount: T::Balance, ) -> DispatchResult { let source = ensure_signed(origin)?; let dest = T::Lookup::lookup(target)?; - let f = TransferFlags { - keep_alive: true, - best_effort: false, - burn_dust: false - }; + let f = TransferFlags { keep_alive: true, best_effort: false, burn_dust: false }; Self::do_transfer(id, &source, &dest, amount, None, f).map(|_| ()) } @@ -682,11 +678,7 @@ pub mod pallet { let source = T::Lookup::lookup(source)?; let dest = T::Lookup::lookup(dest)?; - let f = TransferFlags { - keep_alive: false, - best_effort: false, - burn_dust: false - }; + let f = TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; Self::do_transfer(id, &source, &dest, amount, Some(origin), f).map(|_| ()) } @@ -704,17 +696,14 @@ pub mod pallet { pub fn freeze( origin: OriginFor, #[pallet::compact] id: T::AssetId, - who: ::Source + who: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let d = Asset::::get(id).ok_or(Error::::Unknown)?; ensure!(&origin == &d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; - ensure!( - Account::::contains_key(id, &who), - Error::::BalanceZero - ); + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); Account::::mutate(id, &who, |a| a.is_frozen = true); @@ -735,19 +724,15 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::thaw())] pub fn thaw( origin: OriginFor, - #[pallet::compact] - id: T::AssetId, - who: ::Source + #[pallet::compact] id: T::AssetId, + who: ::Source, ) -> DispatchResult { let origin = ensure_signed(origin)?; let details = Asset::::get(id).ok_or(Error::::Unknown)?; ensure!(&origin == &details.admin, Error::::NoPermission); let who = T::Lookup::lookup(who)?; - ensure!( - Account::::contains_key(id, &who), - Error::::BalanceZero - ); + ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); Account::::mutate(id, &who, |a| a.is_frozen = false); @@ -767,7 +752,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::freeze_asset())] pub fn freeze_asset( origin: OriginFor, - #[pallet::compact] id: T::AssetId + #[pallet::compact] id: T::AssetId, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -794,7 +779,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::thaw_asset())] pub fn thaw_asset( origin: OriginFor, - #[pallet::compact] id: T::AssetId + #[pallet::compact] id: T::AssetId, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -832,7 +817,7 @@ pub mod pallet { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; ensure!(&origin == &details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()); + return Ok(()) } let metadata_deposit = Metadata::::get(id).deposit; @@ -912,14 +897,10 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - let bounded_name: BoundedVec = name - .clone() - .try_into() - .map_err(|_| Error::::BadMetadata)?; - let bounded_symbol: BoundedVec = symbol - .clone() - .try_into() - .map_err(|_| Error::::BadMetadata)?; + let bounded_name: BoundedVec = + name.clone().try_into().map_err(|_| Error::::BadMetadata)?; + let bounded_symbol: BoundedVec = + symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; let d = Asset::::get(id).ok_or(Error::::Unknown)?; ensure!(&origin == &d.owner, Error::::NoPermission); @@ -1008,15 +989,11 @@ pub mod pallet { ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; - let bounded_name: BoundedVec = name - .clone() - .try_into() - .map_err(|_| Error::::BadMetadata)?; + let bounded_name: BoundedVec = + name.clone().try_into().map_err(|_| Error::::BadMetadata)?; - let bounded_symbol: BoundedVec = symbol - .clone() - .try_into() - .map_err(|_| Error::::BadMetadata)?; + let bounded_symbol: BoundedVec = + symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; ensure!(Asset::::contains_key(id), Error::::Unknown); Metadata::::try_mutate_exists(id, |metadata| { @@ -1145,25 +1122,28 @@ pub mod pallet { let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; ensure!(!d.is_frozen, Error::::Frozen); - Approvals::::try_mutate((id, &owner, &delegate), |maybe_approved| -> DispatchResult { - let mut approved = match maybe_approved.take() { - // an approval already exists and is being updated - Some(a) => a, - // a new approval is created - None => { - d.approvals.saturating_inc(); - Default::default() + Approvals::::try_mutate( + (id, &owner, &delegate), + |maybe_approved| -> DispatchResult { + let mut approved = match maybe_approved.take() { + // an approval already exists and is being updated + Some(a) => a, + // a new approval is created + None => { + d.approvals.saturating_inc(); + Default::default() + }, + }; + let deposit_required = T::ApprovalDeposit::get(); + if approved.deposit < deposit_required { + T::Currency::reserve(&owner, deposit_required - approved.deposit)?; + approved.deposit = deposit_required; } - }; - let deposit_required = T::ApprovalDeposit::get(); - if approved.deposit < deposit_required { - T::Currency::reserve(&owner, deposit_required - approved.deposit)?; - approved.deposit = deposit_required; - } - approved.amount = approved.amount.saturating_add(amount); - *maybe_approved = Some(approved); - Ok(()) - })?; + approved.amount = approved.amount.saturating_add(amount); + *maybe_approved = Some(approved); + Ok(()) + }, + )?; Asset::::insert(id, d); Self::deposit_event(Event::ApprovedTransfer(id, owner, delegate, amount)); @@ -1192,7 +1172,8 @@ pub mod pallet { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; - let approval = Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; + let approval = + Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; T::Currency::unreserve(&owner, approval.deposit); d.approvals.saturating_dec(); @@ -1234,7 +1215,8 @@ pub mod pallet { let owner = T::Lookup::lookup(owner)?; let delegate = T::Lookup::lookup(delegate)?; - let approval = Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; + let approval = + Approvals::::take((id, &owner, &delegate)).ok_or(Error::::Unknown)?; T::Currency::unreserve(&owner, approval.deposit); d.approvals.saturating_dec(); Asset::::insert(id, d); @@ -1273,33 +1255,31 @@ pub mod pallet { let owner = T::Lookup::lookup(owner)?; let destination = T::Lookup::lookup(destination)?; - Approvals::::try_mutate_exists((id, &owner, delegate), |maybe_approved| -> DispatchResult { - let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; - let remaining = approved - .amount - .checked_sub(&amount) - .ok_or(Error::::Unapproved)?; - - let f = TransferFlags { - keep_alive: false, - best_effort: false, - burn_dust: false - }; - Self::do_transfer(id, &owner, &destination, amount, None, f)?; - - if remaining.is_zero() { - T::Currency::unreserve(&owner, approved.deposit); - Asset::::mutate(id, |maybe_details| { - if let Some(details) = maybe_details { - details.approvals.saturating_dec(); - } - }); - } else { - approved.amount = remaining; - *maybe_approved = Some(approved); - } - Ok(()) - })?; + Approvals::::try_mutate_exists( + (id, &owner, delegate), + |maybe_approved| -> DispatchResult { + let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; + let remaining = + approved.amount.checked_sub(&amount).ok_or(Error::::Unapproved)?; + + let f = + TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; + Self::do_transfer(id, &owner, &destination, amount, None, f)?; + + if remaining.is_zero() { + T::Currency::unreserve(&owner, approved.deposit); + Asset::::mutate(id, |maybe_details| { + if let Some(details) = maybe_details { + details.approvals.saturating_dec(); + } + }); + } else { + approved.amount = remaining; + *maybe_approved = Some(approved); + } + Ok(()) + }, + )?; Ok(()) } } diff --git a/substrate/frame/assets/src/mock.rs b/substrate/frame/assets/src/mock.rs index 429548a5d1c28a24cbd58acb904911d6cf9a04c1..e4f5763f149fe6297f35f5769b5dd9198f68e325 100644 --- a/substrate/frame/assets/src/mock.rs +++ b/substrate/frame/assets/src/mock.rs @@ -20,9 +20,12 @@ use super::*; use crate as pallet_assets; +use frame_support::{construct_runtime, parameter_types}; use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use frame_support::{parameter_types, construct_runtime}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -108,8 +111,7 @@ impl Config for Test { type Extra = (); } -use std::cell::RefCell; -use std::collections::HashMap; +use std::{cell::RefCell, collections::HashMap}; #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub(crate) enum Hook { diff --git a/substrate/frame/assets/src/tests.rs b/substrate/frame/assets/src/tests.rs index c2cf9acf29bdb92c3c5bbcab7e89c611a1bda534..aab534a6e4efc4670563cf6be8c74eaf3b0f134b 100644 --- a/substrate/frame/assets/src/tests.rs +++ b/substrate/frame/assets/src/tests.rs @@ -18,10 +18,10 @@ //! Tests for Assets pallet. use super::*; -use crate::{Error, mock::*}; -use sp_runtime::{TokenError, traits::ConvertInto}; -use frame_support::{assert_ok, assert_noop, traits::Currency}; +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok, traits::Currency}; use pallet_balances::Error as BalancesError; +use sp_runtime::{traits::ConvertInto, TokenError}; #[test] fn basic_minting_should_work() { @@ -151,13 +151,25 @@ fn force_cancel_approval_works() { assert_eq!(Asset::::get(0).unwrap().approvals, 1); let e = Error::::NoPermission; assert_noop!(Assets::force_cancel_approval(Origin::signed(2), 0, 1, 2), e); - assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 1, 1, 2), Error::::Unknown); - assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 0, 2, 2), Error::::Unknown); - assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 3), Error::::Unknown); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 1, 1, 2), + Error::::Unknown + ); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 0, 2, 2), + Error::::Unknown + ); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 0, 1, 3), + Error::::Unknown + ); assert_eq!(Asset::::get(0).unwrap().approvals, 1); assert_ok!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2)); assert_eq!(Asset::::get(0).unwrap().approvals, 0); - assert_noop!(Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2), Error::::Unknown); + assert_noop!( + Assets::force_cancel_approval(Origin::signed(1), 0, 1, 2), + Error::::Unknown + ); }); } @@ -222,7 +234,6 @@ fn destroy_with_bad_witness_should_not_work() { w.accounts += 2; w.sufficients += 2; assert_ok!(Assets::destroy(Origin::signed(1), 0, w)); - }); } @@ -259,7 +270,10 @@ fn non_providing_should_work() { // ...or transfer... assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), TokenError::CannotCreate); // ...or force-transfer - assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), TokenError::CannotCreate); + assert_noop!( + Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), + TokenError::CannotCreate + ); Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); @@ -278,7 +292,10 @@ fn min_balance_should_work() { // Cannot create a new account with a balance that is below minimum... assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); - assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), TokenError::BelowMinimum); + assert_noop!( + Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), + TokenError::BelowMinimum + ); // When deducting from an account to below minimum, it should be reaped. assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); @@ -333,7 +350,10 @@ fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), Error::::BalanceLow); + assert_noop!( + Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), + Error::::BalanceLow + ); assert_ok!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 90)); assert_eq!(Assets::balance(0, 1), 10); assert_eq!(Assets::balance(0, 2), 90); @@ -385,13 +405,19 @@ fn origin_guards_should_work() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); - assert_noop!(Assets::transfer_ownership(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!( + Assets::transfer_ownership(Origin::signed(2), 0, 2), + Error::::NoPermission + ); assert_noop!(Assets::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); assert_noop!(Assets::freeze(Origin::signed(2), 0, 1), Error::::NoPermission); assert_noop!(Assets::thaw(Origin::signed(2), 0, 2), Error::::NoPermission); assert_noop!(Assets::mint(Origin::signed(2), 0, 2, 100), Error::::NoPermission); assert_noop!(Assets::burn(Origin::signed(2), 0, 1, 100), Error::::NoPermission); - assert_noop!(Assets::force_transfer(Origin::signed(2), 0, 1, 2, 100), Error::::NoPermission); + assert_noop!( + Assets::force_transfer(Origin::signed(2), 0, 1, 2, 100), + Error::::NoPermission + ); let w = Asset::::get(0).unwrap().destroy_witness(); assert_noop!(Assets::destroy(Origin::signed(2), 0, w), Error::::NoPermission); }); @@ -410,7 +436,10 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&2), 1); assert_eq!(Balances::reserved_balance(&1), 0); - assert_noop!(Assets::transfer_ownership(Origin::signed(1), 0, 1), Error::::NoPermission); + assert_noop!( + Assets::transfer_ownership(Origin::signed(1), 0, 1), + Error::::NoPermission + ); // Set metadata now and make sure that deposit gets transferred back. assert_ok!(Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12)); @@ -513,25 +542,25 @@ fn set_metadata_should_work() { new_test_ext().execute_with(|| { // Cannot add metadata to unknown asset assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), - Error::::Unknown, - ); + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 10], 12), + Error::::Unknown, + ); assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); // Cannot add metadata to unowned asset assert_noop!( - Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), - Error::::NoPermission, - ); + Assets::set_metadata(Origin::signed(2), 0, vec![0u8; 10], vec![0u8; 10], 12), + Error::::NoPermission, + ); // Cannot add oversized metadata assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), - Error::::BadMetadata, - ); + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 100], vec![0u8; 10], 12), + Error::::BadMetadata, + ); assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), - Error::::BadMetadata, - ); + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 10], vec![0u8; 100], 12), + Error::::BadMetadata, + ); // Successfully add metadata and take deposit Balances::make_free_balance_be(&1, 30); @@ -546,9 +575,9 @@ fn set_metadata_should_work() { // Cannot over-reserve assert_noop!( - Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), - BalancesError::::InsufficientBalance, - ); + Assets::set_metadata(Origin::signed(1), 0, vec![0u8; 20], vec![0u8; 20], 12), + BalancesError::::InsufficientBalance, + ); // Clear Metadata assert!(Metadata::::contains_key(0)); @@ -566,7 +595,6 @@ fn freezer_should_work() { assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - // freeze 50 of it. set_frozen_balance(0, 1, 50); @@ -624,45 +652,73 @@ fn imbalances_should_work() { #[test] fn force_metadata_should_work() { new_test_ext().execute_with(|| { - //force set metadata works + // force set metadata works assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); - assert_ok!(Assets::force_set_metadata(Origin::root(), 0, vec![0u8; 10], vec![0u8; 10], 8, false)); + assert_ok!(Assets::force_set_metadata( + Origin::root(), + 0, + vec![0u8; 10], + vec![0u8; 10], + 8, + false + )); assert!(Metadata::::contains_key(0)); - //overwrites existing metadata + // overwrites existing metadata let asset_original_metadata = Metadata::::get(0); - assert_ok!(Assets::force_set_metadata(Origin::root(), 0, vec![1u8; 10], vec![1u8; 10], 8, false)); + assert_ok!(Assets::force_set_metadata( + Origin::root(), + 0, + vec![1u8; 10], + vec![1u8; 10], + 8, + false + )); assert_ne!(Metadata::::get(0), asset_original_metadata); - //attempt to set metadata for non-existent asset class + // attempt to set metadata for non-existent asset class assert_noop!( Assets::force_set_metadata(Origin::root(), 1, vec![0u8; 10], vec![0u8; 10], 8, false), Error::::Unknown ); - //string length limit check + // string length limit check let limit = StringLimit::get() as usize; assert_noop!( - Assets::force_set_metadata(Origin::root(), 0, vec![0u8; limit + 1], vec![0u8; 10], 8, false), + Assets::force_set_metadata( + Origin::root(), + 0, + vec![0u8; limit + 1], + vec![0u8; 10], + 8, + false + ), Error::::BadMetadata ); assert_noop!( - Assets::force_set_metadata(Origin::root(), 0, vec![0u8; 10], vec![0u8; limit + 1], 8, false), + Assets::force_set_metadata( + Origin::root(), + 0, + vec![0u8; 10], + vec![0u8; limit + 1], + 8, + false + ), Error::::BadMetadata ); - //force clear metadata works + // force clear metadata works assert!(Metadata::::contains_key(0)); assert_ok!(Assets::force_clear_metadata(Origin::root(), 0)); assert!(!Metadata::::contains_key(0)); - //Error handles clearing non-existent asset class + // Error handles clearing non-existent asset class assert_noop!(Assets::force_clear_metadata(Origin::root(), 1), Error::::Unknown); }); } #[test] -fn force_asset_status_should_work(){ +fn force_asset_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 10); Balances::make_free_balance_be(&2, 10); @@ -670,28 +726,28 @@ fn force_asset_status_should_work(){ assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 50)); assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 150)); - //force asset status to change min_balance > balance + // force asset status to change min_balance > balance assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 100, true, false)); assert_eq!(Assets::balance(0, 1), 50); - //account can recieve assets for balance < min_balance + // account can recieve assets for balance < min_balance assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 1)); assert_eq!(Assets::balance(0, 1), 51); - //account on outbound transfer will cleanup for balance < min_balance + // account on outbound transfer will cleanup for balance < min_balance assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 1)); - assert_eq!(Assets::balance(0,1), 0); + assert_eq!(Assets::balance(0, 1), 0); - //won't create new account with balance below min_balance + // won't create new account with balance below min_balance assert_noop!(Assets::transfer(Origin::signed(2), 0, 3, 50), TokenError::BelowMinimum); - //force asset status will not execute for non-existent class + // force asset status will not execute for non-existent class assert_noop!( Assets::force_asset_status(Origin::root(), 1, 1, 1, 1, 1, 90, true, false), Error::::Unknown ); - //account drains to completion when funds dip below min_balance + // account drains to completion when funds dip below min_balance assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 110, true, false)); assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 110)); assert_eq!(Assets::balance(0, 1), 200); @@ -715,7 +771,10 @@ fn balance_conversion_should_work() { Err(ConversionError::AssetMissing) ); assert_eq!( - BalanceToAssetBalance::::to_asset_balance(100, not_sufficient), + BalanceToAssetBalance::::to_asset_balance( + 100, + not_sufficient + ), Err(ConversionError::AssetNotSufficient) ); // 10 / 1 == 10 -> the conversion should 10x the value diff --git a/substrate/frame/assets/src/types.rs b/substrate/frame/assets/src/types.rs index 478905eb68a3be39a8e099d93007c30b54cb6a8b..810b83506e2b2c4aafa0e218e2dc01c97596a359 100644 --- a/substrate/frame/assets/src/types.rs +++ b/substrate/frame/assets/src/types.rs @@ -21,18 +21,13 @@ use super::*; use frame_support::pallet_prelude::*; use frame_support::traits::{fungible, tokens::BalanceConversion}; -use sp_runtime::{FixedPointNumber, FixedPointOperand, FixedU128}; -use sp_runtime::traits::Convert; +use sp_runtime::{traits::Convert, FixedPointNumber, FixedPointOperand, FixedU128}; pub(super) type DepositBalanceOf = <>::Currency as Currency<::AccountId>>::Balance; #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)] -pub struct AssetDetails< - Balance, - AccountId, - DepositBalance, -> { +pub struct AssetDetails { /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. pub(super) owner: AccountId, /// Can mint tokens. @@ -144,7 +139,9 @@ pub trait FrozenBalance { } impl FrozenBalance for () { - fn frozen_balance(_: AssetId, _: &AccountId) -> Option { None } + fn frozen_balance(_: AssetId, _: &AccountId) -> Option { + None + } fn died(_: AssetId, _: &AccountId) {} } @@ -175,10 +172,7 @@ pub(super) struct DebitFlags { impl From for DebitFlags { fn from(f: TransferFlags) -> Self { - Self { - keep_alive: f.keep_alive, - best_effort: f.best_effort, - } + Self { keep_alive: f.keep_alive, best_effort: f.best_effort } } } @@ -205,7 +199,7 @@ type BalanceOf = >>::Balance; /// minimum balance and the minimum asset balance. pub struct BalanceToAssetBalance(PhantomData<(F, T, CON, I)>); impl BalanceConversion, AssetIdOf, AssetBalanceOf> -for BalanceToAssetBalance + for BalanceToAssetBalance where F: fungible::Inspect>, T: Config, diff --git a/substrate/frame/assets/src/weights.rs b/substrate/frame/assets/src/weights.rs index ae5462288a306b820ced97771e5a24bcfa3d7aa7..6e8517064f16dd819fe36c782f75d37182dc0197 100644 --- a/substrate/frame/assets/src/weights.rs +++ b/substrate/frame/assets/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/atomic-swap/src/lib.rs b/substrate/frame/atomic-swap/src/lib.rs index 829328a74e4c61c2f2d2b2948da0aebc191e8bed..1645131369792b4ab0ffc4194757fc112bd615df 100644 --- a/substrate/frame/atomic-swap/src/lib.rs +++ b/substrate/frame/atomic-swap/src/lib.rs @@ -42,16 +42,20 @@ mod tests; -use sp_std::{prelude::*, marker::PhantomData, ops::{Deref, DerefMut}}; -use sp_io::hashing::blake2_256; +use codec::{Decode, Encode}; use frame_support::{ - RuntimeDebugNoBound, - traits::{Get, Currency, ReservableCurrency, BalanceStatus}, - weights::Weight, dispatch::DispatchResult, + traits::{BalanceStatus, Currency, Get, ReservableCurrency}, + weights::Weight, + RuntimeDebugNoBound, }; -use codec::{Encode, Decode}; +use sp_io::hashing::blake2_256; use sp_runtime::RuntimeDebug; +use sp_std::{ + marker::PhantomData, + ops::{Deref, DerefMut}, + prelude::*, +}; /// Pending atomic swap operation. #[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode)] @@ -93,14 +97,20 @@ pub struct BalanceSwapAction> { _marker: PhantomData, } -impl BalanceSwapAction where C: ReservableCurrency { +impl BalanceSwapAction +where + C: ReservableCurrency, +{ /// Create a new swap action value of balance. pub fn new(value: >::Balance) -> Self { Self { value, _marker: PhantomData } } } -impl Deref for BalanceSwapAction where C: ReservableCurrency { +impl Deref for BalanceSwapAction +where + C: ReservableCurrency, +{ type Target = >::Balance; fn deref(&self) -> &Self::Target { @@ -108,14 +118,18 @@ impl Deref for BalanceSwapAction where C: Reservable } } -impl DerefMut for BalanceSwapAction where C: ReservableCurrency { +impl DerefMut for BalanceSwapAction +where + C: ReservableCurrency, +{ fn deref_mut(&mut self) -> &mut Self::Target { &mut self.value } } impl SwapAction for BalanceSwapAction - where C: ReservableCurrency +where + C: ReservableCurrency, { fn reserve(&self, source: &AccountId) -> DispatchResult { C::reserve(&source, self.value) @@ -138,9 +152,9 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; /// Atomic swap's pallet configuration trait. #[pallet::config] @@ -168,9 +182,12 @@ pub mod pallet { pub struct Pallet(PhantomData); #[pallet::storage] - pub type PendingSwaps = StorageDoubleMap<_, - Twox64Concat, T::AccountId, - Blake2_128Concat, HashedProof, + pub type PendingSwaps = StorageDoubleMap< + _, + Twox64Concat, + T::AccountId, + Blake2_128Concat, + HashedProof, PendingSwap, >; @@ -209,7 +226,7 @@ pub mod pallet { } /// Old name generated by `decl_event`. - #[deprecated(note="use `Event` instead")] + #[deprecated(note = "use `Event` instead")] pub type RawEvent = Event; #[pallet::call] @@ -249,9 +266,7 @@ pub mod pallet { }; PendingSwaps::::insert(target.clone(), hashed_proof.clone(), swap.clone()); - Self::deposit_event( - Event::NewSwap(target, hashed_proof, swap) - ); + Self::deposit_event(Event::NewSwap(target, hashed_proof, swap)); Ok(()) } @@ -274,25 +289,20 @@ pub mod pallet { proof: Vec, action: T::SwapAction, ) -> DispatchResult { - ensure!( - proof.len() <= T::ProofLimit::get() as usize, - Error::::ProofTooLarge, - ); + ensure!(proof.len() <= T::ProofLimit::get() as usize, Error::::ProofTooLarge,); let target = ensure_signed(origin)?; let hashed_proof = blake2_256(&proof); - let swap = PendingSwaps::::get(&target, hashed_proof) - .ok_or(Error::::InvalidProof)?; + let swap = + PendingSwaps::::get(&target, hashed_proof).ok_or(Error::::InvalidProof)?; ensure!(swap.action == action, Error::::ClaimActionMismatch); let succeeded = swap.action.claim(&swap.source, &target); PendingSwaps::::remove(target.clone(), hashed_proof.clone()); - Self::deposit_event( - Event::SwapClaimed(target, hashed_proof, succeeded) - ); + Self::deposit_event(Event::SwapClaimed(target, hashed_proof, succeeded)); Ok(()) } @@ -311,12 +321,8 @@ pub mod pallet { ) -> DispatchResult { let source = ensure_signed(origin)?; - let swap = PendingSwaps::::get(&target, hashed_proof) - .ok_or(Error::::NotExist)?; - ensure!( - swap.source == source, - Error::::SourceMismatch, - ); + let swap = PendingSwaps::::get(&target, hashed_proof).ok_or(Error::::NotExist)?; + ensure!(swap.source == source, Error::::SourceMismatch,); ensure!( frame_system::Pallet::::block_number() >= swap.end_block, Error::::DurationNotPassed, @@ -325,9 +331,7 @@ pub mod pallet { swap.action.cancel(&swap.source); PendingSwaps::::remove(&target, hashed_proof.clone()); - Self::deposit_event( - Event::SwapCancelled(target, hashed_proof) - ); + Self::deposit_event(Event::SwapCancelled(target, hashed_proof)); Ok(()) } diff --git a/substrate/frame/atomic-swap/src/tests.rs b/substrate/frame/atomic-swap/src/tests.rs index 11e74be9b4e7f83b6774348270bf1886be98c755..2165b403dd35d61474ca01a11283bb8578b36e66 100644 --- a/substrate/frame/atomic-swap/src/tests.rs +++ b/substrate/frame/atomic-swap/src/tests.rs @@ -84,12 +84,7 @@ const B: u64 = 2; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let genesis = pallet_balances::GenesisConfig:: { - balances: vec![ - (A, 100), - (B, 200), - ], - }; + let genesis = pallet_balances::GenesisConfig:: { balances: vec![(A, 100), (B, 200)] }; genesis.assimilate_storage(&mut t).unwrap(); t.into() } @@ -112,7 +107,8 @@ fn two_party_successful_swap() { hashed_proof.clone(), BalanceSwapAction::new(50), 1000, - ).unwrap(); + ) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 - 50); assert_eq!(Balances::free_balance(B), 200); @@ -126,7 +122,8 @@ fn two_party_successful_swap() { hashed_proof.clone(), BalanceSwapAction::new(75), 1000, - ).unwrap(); + ) + .unwrap(); assert_eq!(Balances::free_balance(A), 100); assert_eq!(Balances::free_balance(B), 200 - 75); @@ -134,11 +131,8 @@ fn two_party_successful_swap() { // A reveals the proof and claims the swap on chain2. chain2.execute_with(|| { - AtomicSwap::claim_swap( - Origin::signed(A), - proof.to_vec(), - BalanceSwapAction::new(75), - ).unwrap(); + AtomicSwap::claim_swap(Origin::signed(A), proof.to_vec(), BalanceSwapAction::new(75)) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 + 75); assert_eq!(Balances::free_balance(B), 200 - 75); @@ -146,11 +140,8 @@ fn two_party_successful_swap() { // B use the revealed proof to claim the swap on chain1. chain1.execute_with(|| { - AtomicSwap::claim_swap( - Origin::signed(B), - proof.to_vec(), - BalanceSwapAction::new(50), - ).unwrap(); + AtomicSwap::claim_swap(Origin::signed(B), proof.to_vec(), BalanceSwapAction::new(50)) + .unwrap(); assert_eq!(Balances::free_balance(A), 100 - 50); assert_eq!(Balances::free_balance(B), 200 + 50); diff --git a/substrate/frame/aura/src/lib.rs b/substrate/frame/aura/src/lib.rs index 7cc9412776df7c6912fad2f30b2068014a2516e4..41fb69dfb5454da2db993f2ddf6af22efa3227f0 100644 --- a/substrate/frame/aura/src/lib.rs +++ b/substrate/frame/aura/src/lib.rs @@ -37,20 +37,22 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_support::{ - Parameter, traits::{Get, FindAuthor, OneSessionHandler, OnTimestampSet}, ConsensusEngineId, + traits::{FindAuthor, Get, OnTimestampSet, OneSessionHandler}, + ConsensusEngineId, Parameter, }; +use sp_consensus_aura::{AuthorityIndex, ConsensusLog, Slot, AURA_ENGINE_ID}; use sp_runtime::{ + generic::DigestItem, + traits::{IsMember, Member, SaturatedConversion, Saturating, Zero}, RuntimeAppPublic, - traits::{SaturatedConversion, Saturating, Zero, Member, IsMember}, generic::DigestItem, }; -use sp_consensus_aura::{AURA_ENGINE_ID, ConsensusLog, AuthorityIndex, Slot}; +use sp_std::prelude::*; +pub mod migrations; mod mock; mod tests; -pub mod migrations; pub use pallet::*; @@ -63,7 +65,11 @@ pub mod pallet { #[pallet::config] pub trait Config: pallet_timestamp::Config + frame_system::Config { /// The identifier type for an authority. - type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + MaybeSerializeDeserialize; + type AuthorityId: Member + + Parameter + + RuntimeAppPublic + + Default + + MaybeSerializeDeserialize; } #[pallet::pallet] @@ -123,10 +129,8 @@ impl Pallet { fn change_authorities(new: Vec) { >::put(&new); - let log: DigestItem = DigestItem::Consensus( - AURA_ENGINE_ID, - ConsensusLog::AuthoritiesChange(new).encode() - ); + let log: DigestItem = + DigestItem::Consensus(AURA_ENGINE_ID, ConsensusLog::AuthoritiesChange(new).encode()); >::deposit_log(log.into()); } @@ -143,7 +147,7 @@ impl Pallet { let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); for (id, mut data) in pre_runtime_digests { if id == AURA_ENGINE_ID { - return Slot::decode(&mut data).ok(); + return Slot::decode(&mut data).ok() } } @@ -166,14 +170,16 @@ impl OneSessionHandler for Pallet { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let authorities = validators.map(|(_, k)| k).collect::>(); Self::initialize_authorities(&authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) - where I: Iterator + where + I: Iterator, { // instant changes if changed { @@ -196,8 +202,9 @@ impl OneSessionHandler for Pallet { } impl FindAuthor for Pallet { - fn find_author<'a, I>(digests: I) -> Option where - I: 'a + IntoIterator + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, { for (id, mut data) in digests.into_iter() { if id == AURA_ENGINE_ID { @@ -220,7 +227,8 @@ impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator + where + I: 'a + IntoIterator, { let i = Inner::find_author(digests)?; @@ -234,9 +242,7 @@ pub type AuraAuthorId = FindAccountFromAuthorIndex>; impl IsMember for Pallet { fn is_member(authority_id: &T::AuthorityId) -> bool { - Self::authorities() - .iter() - .any(|id| id == authority_id) + Self::authorities().iter().any(|id| id == authority_id) } } @@ -248,6 +254,9 @@ impl OnTimestampSet for Pallet { let timestamp_slot = moment / slot_duration; let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - assert!(CurrentSlot::::get() == timestamp_slot, "Timestamp slot must match `CurrentSlot`"); + assert!( + CurrentSlot::::get() == timestamp_slot, + "Timestamp slot must match `CurrentSlot`" + ); } } diff --git a/substrate/frame/aura/src/migrations.rs b/substrate/frame/aura/src/migrations.rs index 038c5b3f3f18b6c93d00297555073cced95bb0e4..e194c17406b63a12b12dd70e05cf809935e701aa 100644 --- a/substrate/frame/aura/src/migrations.rs +++ b/substrate/frame/aura/src/migrations.rs @@ -17,11 +17,13 @@ //! Migrations for the AURA pallet. -use frame_support::{traits::Get, weights::Weight, pallet_prelude::*}; +use frame_support::{pallet_prelude::*, traits::Get, weights::Weight}; struct __LastTimestamp(sp_std::marker::PhantomData); impl frame_support::traits::StorageInstance for __LastTimestamp { - fn pallet_prefix() -> &'static str { T::PalletPrefix::get() } + fn pallet_prefix() -> &'static str { + T::PalletPrefix::get() + } const STORAGE_PREFIX: &'static str = "LastTimestamp"; } diff --git a/substrate/frame/aura/src/mock.rs b/substrate/frame/aura/src/mock.rs index aff6b76a7a49f39c6b6d277c566993ff81015819..72d457165d3c0fce23f08d939b2ef3ed08da938e 100644 --- a/substrate/frame/aura/src/mock.rs +++ b/substrate/frame/aura/src/mock.rs @@ -20,10 +20,13 @@ #![cfg(test)] use crate as pallet_aura; -use sp_consensus_aura::ed25519::AuthorityId; -use sp_runtime::{traits::IdentityLookup, testing::{Header, UintAuthorityId}}; use frame_support::{parameter_types, traits::GenesisBuild}; +use sp_consensus_aura::ed25519::AuthorityId; use sp_core::H256; +use sp_runtime::{ + testing::{Header, UintAuthorityId}, + traits::IdentityLookup, +}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -86,8 +89,10 @@ impl pallet_aura::Config for Test { pub fn new_test_ext(authorities: Vec) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_aura::GenesisConfig::{ + pallet_aura::GenesisConfig:: { authorities: authorities.into_iter().map(|a| UintAuthorityId(a).to_public_key()).collect(), - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/substrate/frame/aura/src/tests.rs b/substrate/frame/aura/src/tests.rs index 18e14e802bd3219001e5083252d93405aa311bd5..14e79ab54753c3885e660f0297c61706071b245b 100644 --- a/substrate/frame/aura/src/tests.rs +++ b/substrate/frame/aura/src/tests.rs @@ -19,7 +19,7 @@ #![cfg(test)] -use crate::mock::{Aura, new_test_ext}; +use crate::mock::{new_test_ext, Aura}; #[test] fn initial_values() { diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs index 1f480926209ec2c7ec08b9c7a3c6a065c53c55b6..e30bcb629662021748a66397dfda2ba363719ccb 100644 --- a/substrate/frame/authority-discovery/src/lib.rs +++ b/substrate/frame/authority-discovery/src/lib.rs @@ -23,16 +23,16 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; use frame_support::traits::OneSessionHandler; use sp_authority_discovery::AuthorityId; +use sp_std::prelude::*; pub use pallet::*; #[frame_support::pallet] pub mod pallet { - use frame_support::pallet_prelude::*; use super::*; + use frame_support::pallet_prelude::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -45,20 +45,12 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn keys)] /// Keys of the current authority set. - pub(super) type Keys = StorageValue< - _, - Vec, - ValueQuery, - >; + pub(super) type Keys = StorageValue<_, Vec, ValueQuery>; #[pallet::storage] #[pallet::getter(fn next_keys)] /// Keys of the next authority set. - pub(super) type NextKeys = StorageValue< - _, - Vec, - ValueQuery, - >; + pub(super) type NextKeys = StorageValue<_, Vec, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -68,9 +60,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { - keys: Default::default(), - } + Self { keys: Default::default() } } } #[pallet::genesis_build] @@ -148,18 +138,18 @@ impl OneSessionHandler for Pallet { #[cfg(test)] mod tests { - use crate as pallet_authority_discovery; use super::*; - use sp_authority_discovery::AuthorityPair; + use crate as pallet_authority_discovery; + use frame_support::{parameter_types, traits::GenesisBuild}; use sp_application_crypto::Pair; + use sp_authority_discovery::AuthorityPair; use sp_core::{crypto::key_types, H256}; use sp_io::TestExternalities; use sp_runtime::{ - testing::{Header, UintAuthorityId}, traits::{ConvertInto, IdentityLookup, OpaqueKeys}, - Perbill, KeyTypeId, + testing::{Header, UintAuthorityId}, + traits::{ConvertInto, IdentityLookup, OpaqueKeys}, + KeyTypeId, Perbill, }; - use frame_support::parameter_types; - use frame_support::traits::GenesisBuild; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -260,41 +250,44 @@ mod tests { // everywhere. let account_id = AuthorityPair::from_seed_slice(vec![10; 32].as_ref()).unwrap().public(); - let mut first_authorities: Vec = vec![0, 1].into_iter() + let mut first_authorities: Vec = vec![0, 1] + .into_iter() .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); - let second_authorities: Vec = vec![2, 3].into_iter() + let second_authorities: Vec = vec![2, 3] + .into_iter() .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); // Needed for `pallet_session::OneSessionHandler::on_new_session`. - let second_authorities_and_account_ids = second_authorities.clone() + let second_authorities_and_account_ids = second_authorities + .clone() .into_iter() .map(|id| (&account_id, id)) - .collect:: >(); + .collect::>(); - let mut third_authorities: Vec = vec![4, 5].into_iter() + let mut third_authorities: Vec = vec![4, 5] + .into_iter() .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) .map(AuthorityId::from) .collect(); // Needed for `pallet_session::OneSessionHandler::on_new_session`. - let third_authorities_and_account_ids = third_authorities.clone() + let third_authorities_and_account_ids = third_authorities + .clone() .into_iter() .map(|id| (&account_id, id)) - .collect:: >(); + .collect::>(); // Build genesis. - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); GenesisBuild::::assimilate_storage( - &pallet_authority_discovery::GenesisConfig{keys: vec![]}, - &mut t - ).unwrap(); + &pallet_authority_discovery::GenesisConfig { keys: vec![] }, + &mut t, + ) + .unwrap(); // Create externalities. let mut externalities = TestExternalities::new(t); @@ -303,7 +296,7 @@ mod tests { use frame_support::traits::OneSessionHandler; AuthorityDiscovery::on_genesis_session( - first_authorities.iter().map(|id| (id, id.clone())) + first_authorities.iter().map(|id| (id, id.clone())), ); first_authorities.sort(); let mut authorities_returned = AuthorityDiscovery::authorities(); @@ -318,8 +311,7 @@ mod tests { ); let authorities_returned = AuthorityDiscovery::authorities(); assert_eq!( - first_authorities, - authorities_returned, + first_authorities, authorities_returned, "Expected authority set not to change as `changed` was set to false.", ); @@ -329,7 +321,8 @@ mod tests { second_authorities_and_account_ids.into_iter(), third_authorities_and_account_ids.clone().into_iter(), ); - let mut second_and_third_authorities = second_authorities.iter() + let mut second_and_third_authorities = second_authorities + .iter() .chain(third_authorities.iter()) .cloned() .collect::>(); diff --git a/substrate/frame/authorship/src/lib.rs b/substrate/frame/authorship/src/lib.rs index ca03320306d398c774dec8dd6384d3e203e77650..73efbbe30b014095e1eda56ba09fbbf4033900e2 100644 --- a/substrate/frame/authorship/src/lib.rs +++ b/substrate/frame/authorship/src/lib.rs @@ -21,13 +21,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result, prelude::*, collections::btree_set::BTreeSet}; +use codec::{Decode, Encode}; use frame_support::{ - dispatch, traits::{FindAuthor, VerifySeal, Get}, + dispatch, + traits::{FindAuthor, Get, VerifySeal}, }; -use codec::{Encode, Decode}; +use sp_authorship::{InherentError, UnclesInherentData, INHERENT_IDENTIFIER}; use sp_runtime::traits::{Header as HeaderT, One, Saturating}; -use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; +use sp_std::{collections::btree_set::BTreeSet, prelude::*, result}; const MAX_UNCLES: usize = 10; @@ -56,15 +57,15 @@ pub trait FilterUncle { /// Do additional filtering on a seal-checked uncle block, with the accumulated /// filter. - fn filter_uncle(header: &Header, acc: &mut Self::Accumulator) - -> Result, &'static str>; + fn filter_uncle( + header: &Header, + acc: &mut Self::Accumulator, + ) -> Result, &'static str>; } impl FilterUncle for () { type Accumulator = (); - fn filter_uncle(_: &H, _acc: &mut Self::Accumulator) - -> Result, &'static str> - { + fn filter_uncle(_: &H, _acc: &mut Self::Accumulator) -> Result, &'static str> { Ok(None) } } @@ -74,14 +75,10 @@ impl FilterUncle for () { /// equivocating is high. pub struct SealVerify(sp_std::marker::PhantomData); -impl> FilterUncle - for SealVerify -{ +impl> FilterUncle for SealVerify { type Accumulator = (); - fn filter_uncle(header: &Header, _acc: &mut ()) - -> Result, &'static str> - { + fn filter_uncle(header: &Header, _acc: &mut ()) -> Result, &'static str> { T::verify_seal(header) } } @@ -92,8 +89,7 @@ impl> FilterUncle /// This does O(n log n) work in the number of uncles included. pub struct OnePerAuthorPerHeight(sp_std::marker::PhantomData<(T, N)>); -impl FilterUncle - for OnePerAuthorPerHeight +impl FilterUncle for OnePerAuthorPerHeight where Header: HeaderT + PartialEq, Header::Number: Ord, @@ -102,15 +98,16 @@ where { type Accumulator = BTreeSet<(Header::Number, Author)>; - fn filter_uncle(header: &Header, acc: &mut Self::Accumulator) - -> Result, &'static str> - { + fn filter_uncle( + header: &Header, + acc: &mut Self::Accumulator, + ) -> Result, &'static str> { let author = T::verify_seal(header)?; let number = header.number(); if let Some(ref author) = author { if !acc.insert((number.clone(), author.clone())) { - return Err("more than one uncle per number per author included"); + return Err("more than one uncle per number per author included") } } @@ -126,9 +123,9 @@ enum UncleEntryItem { } #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { @@ -161,10 +158,8 @@ pub mod pallet { #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(_); - #[pallet::hooks] impl Hooks> for Pallet { - fn on_initialize(now: T::BlockNumber) -> Weight { let uncle_generations = T::UncleGenerations::get(); // prune uncles that are older than the allowed number of generations. @@ -189,11 +184,8 @@ pub mod pallet { #[pallet::storage] /// Uncles - pub(super) type Uncles = StorageValue< - _, - Vec>, - ValueQuery, - >; + pub(super) type Uncles = + StorageValue<_, Vec>, ValueQuery>; #[pallet::storage] /// Author of current block. @@ -203,7 +195,6 @@ pub mod pallet { /// Whether uncles were already set in this block. pub(super) type DidSetUncles = StorageValue<_, bool, ValueQuery>; - #[pallet::error] pub enum Error { /// The uncle parent not in the chain. @@ -251,14 +242,16 @@ pub mod pallet { if !uncles.is_empty() { let prev_uncles = >::get(); - let mut existing_hashes: Vec<_> = prev_uncles.into_iter().filter_map(|entry| - match entry { + let mut existing_hashes: Vec<_> = prev_uncles + .into_iter() + .filter_map(|entry| match entry { UncleEntryItem::InclusionHeight(_) => None, UncleEntryItem::Uncle(h, _) => Some(h), - } - ).collect(); + }) + .collect(); - let mut acc: >::Accumulator = Default::default(); + let mut acc: >::Accumulator = + Default::default(); for uncle in uncles { match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { @@ -270,10 +263,10 @@ pub mod pallet { if set_uncles.len() == MAX_UNCLES { break } - } + }, Err(_) => { // skip this uncle - } + }, } } } @@ -285,14 +278,14 @@ pub mod pallet { } } - fn check_inherent(call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + fn check_inherent( + call: &Self::Call, + _data: &InherentData, + ) -> result::Result<(), Self::Error> { match call { - Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES => { - Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())) - }, - _ => { - Ok(()) - }, + Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES => + Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())), + _ => Ok(()), } } @@ -310,7 +303,7 @@ impl Pallet { pub fn author() -> T::AccountId { // Check the memoized storage value. if let Some(author) = >::get() { - return author; + return author } let digest = >::digest(); @@ -332,11 +325,10 @@ impl Pallet { let mut acc: >::Accumulator = Default::default(); for uncle in new_uncles { - let prev_uncles = uncles.iter().filter_map(|entry| - match entry { - UncleEntryItem::InclusionHeight(_) => None, - UncleEntryItem::Uncle(h, _) => Some(h), - }); + let prev_uncles = uncles.iter().filter_map(|entry| match entry { + UncleEntryItem::InclusionHeight(_) => None, + UncleEntryItem::Uncle(h, _) => Some(h), + }); let author = Self::verify_uncle(&uncle, prev_uncles, &mut acc)?; let hash = uncle.hash(); @@ -351,7 +343,7 @@ impl Pallet { Ok(()) } - fn verify_uncle<'a, I: IntoIterator>( + fn verify_uncle<'a, I: IntoIterator>( uncle: &T::Header, existing_uncles: I, accumulator: &mut >::Accumulator, @@ -368,23 +360,23 @@ impl Pallet { let hash = uncle.hash(); if uncle.number() < &One::one() { - return Err(Error::::GenesisUncle.into()); + return Err(Error::::GenesisUncle.into()) } if uncle.number() > &maximum_height { - return Err(Error::::TooHighUncle.into()); + return Err(Error::::TooHighUncle.into()) } { let parent_number = uncle.number().clone() - One::one(); let parent_hash = >::block_hash(&parent_number); if &parent_hash != uncle.parent_hash() { - return Err(Error::::InvalidUncleParent.into()); + return Err(Error::::InvalidUncleParent.into()) } } if uncle.number() < &minimum_height { - return Err(Error::::OldUncle.into()); + return Err(Error::::OldUncle.into()) } let duplicate = existing_uncles.into_iter().any(|h| *h == hash); @@ -412,13 +404,15 @@ impl Pallet { #[cfg(test)] mod tests { - use crate as pallet_authorship; use super::*; + use crate as pallet_authorship; + use frame_support::{parameter_types, ConsensusEngineId}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, generic::DigestItem, + generic::DigestItem, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; - use frame_support::{parameter_types, ConsensusEngineId}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -483,11 +477,12 @@ mod tests { impl FindAuthor for AuthorGiven { fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator + where + I: 'a + IntoIterator, { for (id, data) in digests { if id == TEST_ID { - return u64::decode(&mut &data[..]).ok(); + return u64::decode(&mut &data[..]).ok() } } @@ -502,7 +497,8 @@ mod tests { let pre_runtime_digests = header.digest.logs.iter().filter_map(|d| d.as_pre_runtime()); let seals = header.digest.logs.iter().filter_map(|d| d.as_seal()); - let author = AuthorGiven::find_author(pre_runtime_digests).ok_or_else(|| "no author")?; + let author = + AuthorGiven::find_author(pre_runtime_digests).ok_or_else(|| "no author")?; for (id, seal) in seals { if id == TEST_ID { @@ -510,10 +506,10 @@ mod tests { Err(_) => return Err("wrong seal"), Ok(a) => { if a != author { - return Err("wrong author in seal"); + return Err("wrong author in seal") } break - } + }, } } } @@ -533,13 +529,7 @@ mod tests { } fn create_header(number: u64, parent_hash: H256, state_root: H256) -> Header { - Header::new( - number, - Default::default(), - state_root, - parent_hash, - Default::default(), - ) + Header::new(number, Default::default(), state_root, parent_hash, Default::default()) } fn new_test_ext() -> sp_io::TestExternalities { @@ -554,9 +544,14 @@ mod tests { let hash = Default::default(); let author = Default::default(); let uncles = vec![ - InclusionHeight(1u64), Uncle(hash, Some(author)), Uncle(hash, None), Uncle(hash, None), - InclusionHeight(2u64), Uncle(hash, None), - InclusionHeight(3u64), Uncle(hash, None), + InclusionHeight(1u64), + Uncle(hash, Some(author)), + Uncle(hash, None), + Uncle(hash, None), + InclusionHeight(2u64), + Uncle(hash, None), + InclusionHeight(3u64), + Uncle(hash, None), ]; ::Uncles::put(uncles); @@ -595,15 +590,15 @@ mod tests { } let mut canon_chain = CanonChain { - inner: vec![seal_header(create_header(0, Default::default(), Default::default()), 999)], + inner: vec![seal_header( + create_header(0, Default::default(), Default::default()), + 999, + )], }; - let initialize_block = |number, hash: H256| System::initialize( - &number, - &hash, - &Default::default(), - Default::default() - ); + let initialize_block = |number, hash: H256| { + System::initialize(&number, &hash, &Default::default(), Default::default()) + }; for number in 1..8 { initialize_block(number, canon_chain.best_hash()); @@ -691,18 +686,11 @@ mod tests { fn sets_author_lazily() { new_test_ext().execute_with(|| { let author = 42; - let mut header = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author, - ); + let mut header = + seal_header(create_header(1, Default::default(), [1; 32].into()), author); header.digest_mut().pop(); // pop the seal off. - System::initialize( - &1, - &Default::default(), - header.digest(), - Default::default(), - ); + System::initialize(&1, &Default::default(), header.digest(), Default::default()); assert_eq!(Authorship::author(), author); }); @@ -716,27 +704,15 @@ mod tests { let author_b = 43; let mut acc: >::Accumulator = Default::default(); - let header_a1 = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author_a, - ); - let header_b1 = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author_b, - ); - - let header_a2_1 = seal_header( - create_header(2, Default::default(), [1; 32].into()), - author_a, - ); - let header_a2_2 = seal_header( - create_header(2, Default::default(), [2; 32].into()), - author_a, - ); - - let mut check_filter = move |uncle| { - Filter::filter_uncle(uncle, &mut acc) - }; + let header_a1 = seal_header(create_header(1, Default::default(), [1; 32].into()), author_a); + let header_b1 = seal_header(create_header(1, Default::default(), [1; 32].into()), author_b); + + let header_a2_1 = + seal_header(create_header(2, Default::default(), [1; 32].into()), author_a); + let header_a2_2 = + seal_header(create_header(2, Default::default(), [2; 32].into()), author_a); + + let mut check_filter = move |uncle| Filter::filter_uncle(uncle, &mut acc); // same height, different author is OK. assert_eq!(check_filter(&header_a1), Ok(Some(author_a))); diff --git a/substrate/frame/babe/src/benchmarking.rs b/substrate/frame/babe/src/benchmarking.rs index 145a82c4f8049e857a4ad8d3dae57c2dc55d755e..b8a85daf6e666bc0593d1914ee97b16b46bec249 100644 --- a/substrate/frame/babe/src/benchmarking.rs +++ b/substrate/frame/babe/src/benchmarking.rs @@ -95,10 +95,7 @@ mod tests { ); println!("equivocation_proof: {:?}", equivocation_proof); - println!( - "equivocation_proof.encode(): {:?}", - equivocation_proof.encode() - ); + println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); }); } } diff --git a/substrate/frame/babe/src/default_weights.rs b/substrate/frame/babe/src/default_weights.rs index f16f589a77cd40f7bf6d467a3f9d2409d54825f9..20ac9b961fc8d83e0fc3482f8b9d50e0b9324b5b 100644 --- a/substrate/frame/babe/src/default_weights.rs +++ b/substrate/frame/babe/src/default_weights.rs @@ -19,7 +19,8 @@ //! This file was not auto-generated. use frame_support::weights::{ - Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, + constants::{RocksDbWeight as DbWeight, WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, + Weight, }; impl crate::WeightInfo for () { diff --git a/substrate/frame/babe/src/equivocation.rs b/substrate/frame/babe/src/equivocation.rs index e9017205c6b5884a7d7eeef4c2239ec9595df111..95abd87787b4a30f45e1e0b6af6ff23e15a8c7b9 100644 --- a/substrate/frame/babe/src/equivocation.rs +++ b/substrate/frame/babe/src/equivocation.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! //! An opt-in utility module for reporting equivocations. //! //! This module defines an offence type for BABE equivocations @@ -33,22 +32,23 @@ //! When using this module for enabling equivocation reporting it is required //! that the `ValidateUnsigned` for the BABE pallet is used in the runtime //! definition. -//! use frame_support::traits::{Get, KeyOwnerProofSystem}; use sp_consensus_babe::{EquivocationProof, Slot}; -use sp_runtime::transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, - TransactionValidityError, ValidTransaction, +use sp_runtime::{ + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + TransactionValidityError, ValidTransaction, + }, + DispatchResult, Perbill, }; -use sp_runtime::{DispatchResult, Perbill}; use sp_staking::{ offence::{Kind, Offence, OffenceError, ReportOffence}, SessionIndex, }; use sp_std::prelude::*; -use crate::{Call, Pallet, Config}; +use crate::{Call, Config, Pallet}; /// A trait with utility methods for handling equivocation reports in BABE. /// The trait provides methods for reporting an offence triggered by a valid @@ -115,9 +115,7 @@ pub struct EquivocationHandler { impl Default for EquivocationHandler { fn default() -> Self { - Self { - _phantom: Default::default(), - } + Self { _phantom: Default::default() } } } @@ -188,30 +186,28 @@ impl Pallet { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { // discard equivocation report not coming from the local node match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, _ => { log::warn!( target: "runtime::babe", "rejecting unsigned report equivocation transaction because it is not local/in-block.", ); - return InvalidTransaction::Call.into(); - } + return InvalidTransaction::Call.into() + }, } // check report staleness is_known_offence::(equivocation_proof, key_owner_proof)?; - let longevity = >::ReportLongevity::get(); + let longevity = + >::ReportLongevity::get(); ValidTransaction::with_tag_prefix("BabeEquivocation") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::max_value()) // Only one equivocation report for the same offender at the same slot. - .and_provides(( - equivocation_proof.offender.clone(), - *equivocation_proof.slot, - )) + .and_provides((equivocation_proof.offender.clone(), *equivocation_proof.slot)) .longevity(longevity) // We don't propagate this. This can never be included on a remote node. .propagate(false) @@ -235,10 +231,7 @@ fn is_known_offence( key_owner_proof: &T::KeyOwnerProof, ) -> Result<(), TransactionValidityError> { // check the membership proof to extract the offender's id - let key = ( - sp_consensus_babe::KEY_TYPE, - equivocation_proof.offender.clone(), - ); + let key = (sp_consensus_babe::KEY_TYPE, equivocation_proof.offender.clone()); let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) .ok_or(InvalidTransaction::BadProof)?; diff --git a/substrate/frame/babe/src/lib.rs b/substrate/frame/babe/src/lib.rs index b52868d1d023ff7a2491b95869ec4d0be2891aaf..949f55720bbd2c8d05a1d57ed20b7bf8cfdc327d 100644 --- a/substrate/frame/babe/src/lib.rs +++ b/substrate/frame/babe/src/lib.rs @@ -24,7 +24,7 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::DispatchResultWithPostInfo, - traits::{FindAuthor, Get, KeyOwnerProofSystem, OneSessionHandler, OnTimestampSet}, + traits::{FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, OneSessionHandler}, weights::{Pays, Weight}, }; use sp_application_crypto::Public; @@ -38,8 +38,8 @@ use sp_std::prelude::*; use sp_consensus_babe::{ digests::{NextConfigDescriptor, NextEpochDescriptor, PreDigest}, - BabeAuthorityWeight, BabeEpochConfiguration, ConsensusLog, Epoch, - EquivocationProof, Slot, BABE_ENGINE_ID, + BabeAuthorityWeight, BabeEpochConfiguration, ConsensusLog, Epoch, EquivocationProof, Slot, + BABE_ENGINE_ID, }; use sp_consensus_vrf::schnorrkel; @@ -80,7 +80,7 @@ pub trait EpochChangeTrigger { pub struct ExternalTrigger; impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. + fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. } /// A type signifying to BABE that it should perform epoch changes @@ -104,9 +104,9 @@ type MaybeRandomness = Option; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; /// The BABE Pallet #[pallet::pallet] @@ -222,11 +222,8 @@ pub mod pallet { /// Next epoch authorities. #[pallet::storage] - pub(super) type NextAuthorities = StorageValue< - _, - Vec<(AuthorityId, BabeAuthorityWeight)>, - ValueQuery, - >; + pub(super) type NextAuthorities = + StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; /// Randomness under construction. /// @@ -242,13 +239,8 @@ pub mod pallet { /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. #[pallet::storage] - pub(super) type UnderConstruction = StorageMap< - _, - Twox64Concat, - u32, - Vec, - ValueQuery, - >; + pub(super) type UnderConstruction = + StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. @@ -270,11 +262,8 @@ pub mod pallet { /// entropy was fixed (i.e. it was known to chain observers). Since epochs are defined in /// slots, which may be skipped, the block numbers may not line up with the slot numbers. #[pallet::storage] - pub(super) type EpochStart = StorageValue< - _, - (T::BlockNumber, T::BlockNumber), - ValueQuery, - >; + pub(super) type EpochStart = + StorageValue<_, (T::BlockNumber, T::BlockNumber), ValueQuery>; /// How late the current block is compared to its parent. /// @@ -303,10 +292,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - GenesisConfig { - authorities: Default::default(), - epoch_config: Default::default(), - } + GenesisConfig { authorities: Default::default(), epoch_config: Default::default() } } } @@ -315,7 +301,9 @@ pub mod pallet { fn build(&self) { SegmentIndex::::put(0); Pallet::::initialize_authorities(&self.authorities); - EpochConfig::::put(self.epoch_config.clone().expect("epoch_config must not be None")); + EpochConfig::::put( + self.epoch_config.clone().expect("epoch_config must not be None"), + ); } } @@ -359,11 +347,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; - Self::do_report_equivocation( - Some(reporter), - equivocation_proof, - key_owner_proof, - ) + Self::do_report_equivocation(Some(reporter), equivocation_proof, key_owner_proof) } /// Report authority equivocation/misbehavior. This method will verify @@ -423,8 +407,9 @@ pub mod pallet { pub type BabeKey = [u8; PUBLIC_KEY_LENGTH]; impl FindAuthor for Pallet { - fn find_author<'a, I>(digests: I) -> Option where - I: 'a + IntoIterator + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, { for (id, mut data) in digests.into_iter() { if id == BABE_ENGINE_ID { @@ -433,15 +418,13 @@ impl FindAuthor for Pallet { } } - return None; + return None } } impl IsMember for Pallet { fn is_member(authority_id: &AuthorityId) -> bool { - >::authorities() - .iter() - .any(|id| &id.0 == authority_id) + >::authorities().iter().any(|id| &id.0 == authority_id) } } @@ -490,7 +473,6 @@ impl Pallet { /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot /// number will grow while the block number will not. Hence, the result can be interpreted as an /// upper bound. - // // ## IMPORTANT NOTE // // This implementation is linked to how [`should_epoch_change`] is working. This might need to @@ -500,13 +482,11 @@ impl Pallet { // update this function, you must also update the corresponding weight. pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); - next_slot - .checked_sub(*CurrentSlot::::get()) - .map(|slots_remaining| { - // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. - let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); - now.saturating_add(blocks_remaining) - }) + next_slot.checked_sub(*CurrentSlot::::get()).map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }) } /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, @@ -553,10 +533,8 @@ impl Pallet { // so that nodes can track changes. let next_randomness = NextRandomness::::get(); - let next_epoch = NextEpochDescriptor { - authorities: next_authorities, - randomness: next_randomness, - }; + let next_epoch = + NextEpochDescriptor { authorities: next_authorities, randomness: next_randomness }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); if let Some(next_config) = NextEpochConfig::::get() { @@ -587,7 +565,8 @@ impl Pallet { duration: T::EpochDuration::get(), authorities: Self::authorities(), randomness: Self::randomness(), - config: EpochConfig::::get().expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), + config: EpochConfig::::get() + .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), } } @@ -606,7 +585,9 @@ impl Pallet { authorities: NextAuthorities::::get(), randomness: NextRandomness::::get(), config: NextEpochConfig::::get().unwrap_or_else(|| { - EpochConfig::::get().expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed") + EpochConfig::::get().expect( + "EpochConfig is initialized in genesis; we never `take` or `kill` it; qed", + ) }), } } @@ -617,9 +598,7 @@ impl Pallet { const PROOF: &str = "slot number is u64; it should relate in some way to wall clock time; \ if u64 is not enough we should crash for safety; qed."; - let epoch_start = epoch_index - .checked_mul(T::EpochDuration::get()) - .expect(PROOF); + let epoch_start = epoch_index.checked_mul(T::EpochDuration::get()).expect(PROOF); epoch_start.checked_add(*GenesisSlot::::get()).expect(PROOF).into() } @@ -649,19 +628,22 @@ impl Pallet { // => let's ensure that we only modify the storage once per block let initialized = Self::initialized().is_some(); if initialized { - return; + return } - let maybe_pre_digest: Option = >::digest() - .logs - .iter() - .filter_map(|s| s.as_pre_runtime()) - .filter_map(|(id, mut data)| if id == BABE_ENGINE_ID { - PreDigest::decode(&mut data).ok() - } else { - None - }) - .next(); + let maybe_pre_digest: Option = + >::digest() + .logs + .iter() + .filter_map(|s| s.as_pre_runtime()) + .filter_map(|(id, mut data)| { + if id == BABE_ENGINE_ID { + PreDigest::decode(&mut data).ok() + } else { + None + } + }) + .next(); let is_primary = matches!(maybe_pre_digest, Some(PreDigest::Primary(..))); @@ -697,31 +679,22 @@ impl Pallet { let authority_index = digest.authority_index(); // Extract out the VRF output if we have it - digest - .vrf_output() - .and_then(|vrf_output| { - // Reconstruct the bytes of VRFInOut using the authority id. - Authorities::::get() - .get(authority_index as usize) - .and_then(|author| { - schnorrkel::PublicKey::from_bytes(author.0.as_slice()).ok() - }) - .and_then(|pubkey| { - let transcript = sp_consensus_babe::make_transcript( - &Self::randomness(), - current_slot, - EpochIndex::::get(), - ); - - vrf_output.0.attach_input_hash( - &pubkey, - transcript - ).ok() - }) - .map(|inout| { - inout.make_bytes(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT) - }) - }) + digest.vrf_output().and_then(|vrf_output| { + // Reconstruct the bytes of VRFInOut using the authority id. + Authorities::::get() + .get(authority_index as usize) + .and_then(|author| schnorrkel::PublicKey::from_bytes(author.0.as_slice()).ok()) + .and_then(|pubkey| { + let transcript = sp_consensus_babe::make_transcript( + &Self::randomness(), + current_slot, + EpochIndex::::get(), + ); + + vrf_output.0.attach_input_hash(&pubkey, transcript).ok() + }) + .map(|inout| inout.make_bytes(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT)) + }) }); // For primary VRF output we place it in the `Initialized` storage @@ -774,7 +747,7 @@ impl Pallet { // validate the equivocation proof if !sp_consensus_babe::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } let validator_set_count = key_owner_proof.validator_count(); @@ -786,7 +759,7 @@ impl Pallet { // check that the slot number is consistent with the session index // in the key ownership proof (i.e. slot is for that epoch) if epoch_index != session_index { - return Err(Error::::InvalidKeyOwnershipProof.into()); + return Err(Error::::InvalidKeyOwnershipProof.into()) } // check the membership proof and extract the offender's id @@ -794,12 +767,8 @@ impl Pallet { let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof) .ok_or(Error::::InvalidKeyOwnershipProof)?; - let offence = BabeEquivocationOffence { - slot, - validator_set_count, - offender, - session_index, - }; + let offence = + BabeEquivocationOffence { slot, validator_set_count, offender, session_index }; let reporters = match reporter { Some(id) => vec![id], @@ -837,7 +806,10 @@ impl OnTimestampSet for Pallet { let timestamp_slot = moment / slot_duration; let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); - assert!(CurrentSlot::::get() == timestamp_slot, "Timestamp slot must match `CurrentSlot`"); + assert!( + CurrentSlot::::get() == timestamp_slot, + "Timestamp slot must match `CurrentSlot`" + ); } } @@ -850,10 +822,7 @@ impl frame_support::traits::EstimateNextSessionRotation::get().saturating_sub(Self::current_epoch_start()) + 1; ( - Some(Permill::from_rational( - *elapsed, - T::EpochDuration::get(), - )), + Some(Permill::from_rational(*elapsed, T::EpochDuration::get())), // Read: Current Slot, Epoch Index, Genesis Slot T::DbWeight::get().reads(3), ) @@ -882,22 +851,20 @@ impl OneSessionHandler for Pallet { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); Self::initialize_authorities(&authorities); } fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) - where I: Iterator + where + I: Iterator, { - let authorities = validators.map(|(_account, k)| { - (k, 1) - }).collect::>(); + let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); - let next_authorities = queued_validators.map(|(_account, k)| { - (k, 1) - }).collect::>(); + let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); Self::enact_epoch_change(authorities, next_authorities) } @@ -914,7 +881,7 @@ impl OneSessionHandler for Pallet { fn compute_randomness( last_epoch_randomness: schnorrkel::Randomness, epoch_index: u64, - rho: impl Iterator, + rho: impl Iterator, rho_size_hint: Option, ) -> schnorrkel::Randomness { let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); @@ -930,7 +897,7 @@ fn compute_randomness( pub mod migrations { use super::*; - use frame_support::pallet_prelude::{ValueQuery, StorageValue}; + use frame_support::pallet_prelude::{StorageValue, ValueQuery}; /// Something that can return the storage prefix of the `Babe` pallet. pub trait BabePalletPrefix: Config { @@ -939,13 +906,14 @@ pub mod migrations { struct __OldNextEpochConfig(sp_std::marker::PhantomData); impl frame_support::traits::StorageInstance for __OldNextEpochConfig { - fn pallet_prefix() -> &'static str { T::pallet_prefix() } + fn pallet_prefix() -> &'static str { + T::pallet_prefix() + } const STORAGE_PREFIX: &'static str = "NextEpochConfig"; } - type OldNextEpochConfig = StorageValue< - __OldNextEpochConfig, Option, ValueQuery - >; + type OldNextEpochConfig = + StorageValue<__OldNextEpochConfig, Option, ValueQuery>; /// A storage migration that adds the current epoch configuration for Babe /// to storage. diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index ea54e9f7cea832aa108aec7b47250d270c9f6971..795d51e5876f82da712af93d86847bf4b5494681 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -17,27 +17,31 @@ //! Test utilities -use codec::Encode; use crate::{self as pallet_babe, Config, CurrentSlot}; -use sp_runtime::{ - Perbill, impl_opaque_keys, - curve::PiecewiseLinear, - testing::{Digest, DigestItem, Header, TestXt,}, - traits::{Header as _, IdentityLookup, OpaqueKeys}, -}; -use frame_system::InitKind; +use codec::Encode; +use frame_election_provider_support::onchain; use frame_support::{ parameter_types, - traits::{KeyOwnerProofSystem, OnInitialize, GenesisBuild}, + traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, }; -use sp_io; -use sp_core::{H256, U256, crypto::{IsWrappedBy, KeyTypeId, Pair}}; +use frame_system::InitKind; +use pallet_session::historical as pallet_session_historical; +use pallet_staking::EraIndex; use sp_consensus_babe::{AuthorityId, AuthorityPair, Slot}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_core::{ + crypto::{IsWrappedBy, KeyTypeId, Pair}, + H256, U256, +}; +use sp_io; +use sp_runtime::{ + curve::PiecewiseLinear, + impl_opaque_keys, + testing::{Digest, DigestItem, Header, TestXt}, + traits::{Header as _, IdentityLookup, OpaqueKeys}, + Perbill, +}; use sp_staking::SessionIndex; -use pallet_staking::EraIndex; -use frame_election_provider_support::onchain; -use pallet_session::historical as pallet_session_historical; type DummyValidatorId = u64; @@ -277,7 +281,7 @@ pub fn go_to_block(n: u64, s: u64) { /// Slots will grow accordingly to blocks pub fn progress_to_block(n: u64) { let mut slot = u64::from(Babe::current_slot()) + 1; - for i in System::block_number() + 1 ..= n { + for i in System::block_number() + 1..=n { go_to_block(i, slot); slot += 1; } @@ -308,7 +312,7 @@ pub fn make_primary_pre_digest( slot, vrf_output, vrf_proof, - } + }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } @@ -319,10 +323,7 @@ pub fn make_secondary_plain_pre_digest( slot: sp_consensus_babe::Slot, ) -> Digest { let digest_data = sp_consensus_babe::digests::PreDigest::SecondaryPlain( - sp_consensus_babe::digests::SecondaryPlainPreDigest { - authority_index, - slot, - } + sp_consensus_babe::digests::SecondaryPlainPreDigest { authority_index, slot }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } @@ -340,7 +341,7 @@ pub fn make_secondary_vrf_pre_digest( slot, vrf_output, vrf_proof, - } + }, ); let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); Digest { logs: vec![log] } @@ -348,13 +349,13 @@ pub fn make_secondary_vrf_pre_digest( pub fn make_vrf_output( slot: Slot, - pair: &sp_consensus_babe::AuthorityPair + pair: &sp_consensus_babe::AuthorityPair, ) -> (VRFOutput, VRFProof, [u8; 32]) { let pair = sp_core::sr25519::Pair::from_ref(pair).as_ref(); let transcript = sp_consensus_babe::make_transcript(&Babe::randomness(), slot, 0); let vrf_inout = pair.vrf_sign(transcript); - let vrf_randomness: sp_consensus_vrf::schnorrkel::Randomness = vrf_inout.0 - .make_bytes::<[u8; 32]>(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); + let vrf_randomness: sp_consensus_vrf::schnorrkel::Randomness = + vrf_inout.0.make_bytes::<[u8; 32]>(&sp_consensus_babe::BABE_VRF_INOUT_CONTEXT); let vrf_output = VRFOutput(vrf_inout.0.to_output()); let vrf_proof = VRFProof(vrf_inout.1); @@ -365,10 +366,12 @@ pub fn new_test_ext(authorities_len: usize) -> sp_io::TestExternalities { new_test_ext_with_pairs(authorities_len).1 } -pub fn new_test_ext_with_pairs(authorities_len: usize) -> (Vec, sp_io::TestExternalities) { - let pairs = (0..authorities_len).map(|i| { - AuthorityPair::from_seed(&U256::from(i).into()) - }).collect::>(); +pub fn new_test_ext_with_pairs( + authorities_len: usize, +) -> (Vec, sp_io::TestExternalities) { + let pairs = (0..authorities_len) + .map(|i| AuthorityPair::from_seed(&U256::from(i).into())) + .collect::>(); let public = pairs.iter().map(|p| p.public()).collect(); @@ -376,13 +379,9 @@ pub fn new_test_ext_with_pairs(authorities_len: usize) -> (Vec, s } pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let balances: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, 10_000_000)) - .collect(); + let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); pallet_balances::GenesisConfig:: { balances } .assimilate_storage(&mut t) @@ -393,13 +392,7 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes .iter() .enumerate() .map(|(i, k)| { - ( - i as u64, - i as u64, - MockSessionKeys { - babe_authority: AuthorityId::from(k.clone()), - }, - ) + (i as u64, i as u64, MockSessionKeys { babe_authority: AuthorityId::from(k.clone()) }) }) .collect(); @@ -412,12 +405,7 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes // controllers are the index + 1000 let stakers: Vec<_> = (0..authorities.len()) .map(|i| { - ( - i as u64, - i as u64 + 1000, - 10_000, - pallet_staking::StakerStatus::::Validator, - ) + (i as u64, i as u64 + 1000, 10_000, pallet_staking::StakerStatus::::Validator) }) .collect(); diff --git a/substrate/frame/babe/src/randomness.rs b/substrate/frame/babe/src/randomness.rs index a7e8b31577681d52ede865475a46701cf3176940..7d18629050213e49ee88fdeb4b69ce68a7a27dda 100644 --- a/substrate/frame/babe/src/randomness.rs +++ b/substrate/frame/babe/src/randomness.rs @@ -21,7 +21,7 @@ use super::{ AuthorVrfRandomness, Config, EpochStart, NextRandomness, Randomness, VRF_OUTPUT_LENGTH, }; -use frame_support::{traits::Randomness as RandomnessT}; +use frame_support::traits::Randomness as RandomnessT; use sp_runtime::traits::Hash; /// Randomness usable by consensus protocols that **depend** upon finality and take action diff --git a/substrate/frame/babe/src/tests.rs b/substrate/frame/babe/src/tests.rs index 520a808ab4a57601ebdeba32a3c56ab1b274a114..00ffc7b4edacfe3f8f1a931047f6049f6d1bc00b 100644 --- a/substrate/frame/babe/src/tests.rs +++ b/substrate/frame/babe/src/tests.rs @@ -19,7 +19,7 @@ use super::{Call, *}; use frame_support::{ - assert_err, assert_ok, assert_noop, + assert_err, assert_noop, assert_ok, traits::{Currency, EstimateNextSessionRotation, OnFinalize}, weights::{GetDispatchInfo, Pays}, }; @@ -29,10 +29,8 @@ use sp_consensus_babe::{AllowedSlots, BabeEpochConfiguration, Slot}; use sp_core::crypto::Pair; const EMPTY_RANDOMNESS: [u8; 32] = [ - 74, 25, 49, 128, 53, 97, 244, 49, - 222, 202, 176, 2, 231, 66, 95, 10, - 133, 49, 213, 228, 86, 161, 164, 127, - 217, 153, 138, 37, 48, 192, 248, 0, + 74, 25, 49, 128, 53, 97, 244, 49, 222, 202, 176, 2, 231, 66, 95, 10, 133, 49, 213, 228, 86, + 161, 164, 127, 217, 153, 138, 37, 48, 192, 248, 0, ]; #[test] @@ -43,17 +41,17 @@ fn empty_randomness_is_correct() { #[test] fn initial_values() { - new_test_ext(4).execute_with(|| { - assert_eq!(Babe::authorities().len(), 4) - }) + new_test_ext(4).execute_with(|| assert_eq!(Babe::authorities().len(), 4)) } #[test] fn check_module() { new_test_ext(4).execute_with(|| { assert!(!Babe::should_end_session(0), "Genesis does not change sessions"); - assert!(!Babe::should_end_session(200000), - "BABE does not include the block number in epoch calculations"); + assert!( + !Babe::should_end_session(200000), + "BABE does not include the block number in epoch calculations" + ); }) } @@ -66,20 +64,10 @@ fn first_block_epoch_zero_start() { let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); let first_vrf = vrf_output; - let pre_digest = make_primary_pre_digest( - 0, - genesis_slot, - first_vrf.clone(), - vrf_proof, - ); + let pre_digest = make_primary_pre_digest(0, genesis_slot, first_vrf.clone(), vrf_proof); assert_eq!(Babe::genesis_slot(), Slot::from(0)); - System::initialize( - &1, - &Default::default(), - &pre_digest, - Default::default(), - ); + System::initialize(&1, &Default::default(), &pre_digest, Default::default()); // see implementation of the function for details why: we issue an // epoch-change digest but don't do it via the normal session mechanism. @@ -106,7 +94,7 @@ fn first_block_epoch_zero_start() { sp_consensus_babe::digests::NextEpochDescriptor { authorities: Babe::authorities(), randomness: Babe::randomness(), - } + }, ); let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); @@ -124,12 +112,7 @@ fn author_vrf_output_for_primary() { let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); let primary_pre_digest = make_primary_pre_digest(0, genesis_slot, vrf_output, vrf_proof); - System::initialize( - &1, - &Default::default(), - &primary_pre_digest, - Default::default(), - ); + System::initialize(&1, &Default::default(), &primary_pre_digest, Default::default()); Babe::do_initialize(1); assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); @@ -147,14 +130,10 @@ fn author_vrf_output_for_secondary_vrf() { ext.execute_with(|| { let genesis_slot = Slot::from(10); let (vrf_output, vrf_proof, vrf_randomness) = make_vrf_output(genesis_slot, &pairs[0]); - let secondary_vrf_pre_digest = make_secondary_vrf_pre_digest(0, genesis_slot, vrf_output, vrf_proof); + let secondary_vrf_pre_digest = + make_secondary_vrf_pre_digest(0, genesis_slot, vrf_output, vrf_proof); - System::initialize( - &1, - &Default::default(), - &secondary_vrf_pre_digest, - Default::default(), - ); + System::initialize(&1, &Default::default(), &secondary_vrf_pre_digest, Default::default()); Babe::do_initialize(1); assert_eq!(Babe::author_vrf_randomness(), Some(vrf_randomness)); @@ -192,8 +171,10 @@ fn no_author_vrf_output_for_secondary_plain() { fn authority_index() { new_test_ext(4).execute_with(|| { assert_eq!( - Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), None, - "Trivially invalid authorities are ignored") + Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), + None, + "Trivially invalid authorities are ignored" + ) }) } @@ -237,7 +218,10 @@ fn can_estimate_current_epoch_progress() { Permill::from_percent(100) ); } else { - assert!(Babe::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100)); + assert!( + Babe::estimate_current_session_progress(i).0.unwrap() < + Permill::from_percent(100) + ); } } @@ -287,7 +271,8 @@ fn can_enact_next_config() { c: next_next_config.c, allowed_slots: next_next_config.allowed_slots, }, - ).unwrap(); + ) + .unwrap(); progress_to_block(4); Babe::on_finalize(9); @@ -296,12 +281,11 @@ fn can_enact_next_config() { assert_eq!(EpochConfig::::get(), Some(next_config)); assert_eq!(NextEpochConfig::::get(), Some(next_next_config.clone())); - let consensus_log = sp_consensus_babe::ConsensusLog::NextConfigData( - NextConfigDescriptor::V1 { + let consensus_log = + sp_consensus_babe::ConsensusLog::NextConfigData(NextConfigDescriptor::V1 { c: next_next_config.c, allowed_slots: next_next_config.allowed_slots, - } - ); + }); let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); assert_eq!(header.digest.logs[2], consensus_digest.clone()) @@ -313,29 +297,18 @@ fn only_root_can_enact_config_change() { use sp_runtime::DispatchError; new_test_ext(1).execute_with(|| { - let next_config = NextConfigDescriptor::V1 { - c: (1, 4), - allowed_slots: AllowedSlots::PrimarySlots, - }; + let next_config = + NextConfigDescriptor::V1 { c: (1, 4), allowed_slots: AllowedSlots::PrimarySlots }; - let res = Babe::plan_config_change( - Origin::none(), - next_config.clone(), - ); + let res = Babe::plan_config_change(Origin::none(), next_config.clone()); assert_noop!(res, DispatchError::BadOrigin); - let res = Babe::plan_config_change( - Origin::signed(1), - next_config.clone(), - ); + let res = Babe::plan_config_change(Origin::signed(1), next_config.clone()); assert_noop!(res, DispatchError::BadOrigin); - let res = Babe::plan_config_change( - Origin::root(), - next_config, - ); + let res = Babe::plan_config_change(Origin::root(), next_config); assert!(res.is_ok()); }); @@ -350,10 +323,7 @@ fn can_fetch_current_and_next_epoch_data() { }); // genesis authorities should be used for the first and second epoch - assert_eq!( - Babe::current_epoch().authorities, - Babe::next_epoch().authorities, - ); + assert_eq!(Babe::current_epoch().authorities, Babe::next_epoch().authorities,); // 1 era = 3 epochs // 1 epoch = 3 slots // Eras start from 0. @@ -420,11 +390,7 @@ fn report_equivocation_current_session_works() { assert_eq!( Staking::eras_stakers(1, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -445,10 +411,7 @@ fn report_equivocation_current_session_works() { ); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); // report the equivocation @@ -460,35 +423,24 @@ fn report_equivocation_current_session_works() { start_era(2); // check that the balance of offending validator is slashed 100%. - assert_eq!( - Balances::total_balance(&offending_validator_id), - 10_000_000 - 10_000 - ); + assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( Staking::eras_stakers(2, offending_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); // check that the balances of all other validators are left intact. for validator in &validators { if *validator == offending_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); assert_eq!(Staking::slashable_balance_of(validator), 10_000); assert_eq!( Staking::eras_stakers(2, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } }) @@ -519,10 +471,7 @@ fn report_equivocation_old_session_works() { ); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); // start a new era and report the equivocation @@ -531,10 +480,7 @@ fn report_equivocation_old_session_works() { // check the balance of the offending validator assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000); - assert_eq!( - Staking::slashable_balance_of(&offending_validator_id), - 10_000 - ); + assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 10_000); // report the equivocation Babe::report_equivocation_unsigned(Origin::none(), equivocation_proof, key_owner_proof) @@ -545,18 +491,11 @@ fn report_equivocation_old_session_works() { start_era(3); // check that the balance of offending validator is slashed 100%. - assert_eq!( - Balances::total_balance(&offending_validator_id), - 10_000_000 - 10_000 - ); + assert_eq!(Balances::total_balance(&offending_validator_id), 10_000_000 - 10_000); assert_eq!(Staking::slashable_balance_of(&offending_validator_id), 0); assert_eq!( Staking::eras_stakers(3, offending_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); }) } @@ -585,10 +524,7 @@ fn report_equivocation_invalid_key_owner_proof() { ); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let mut key_owner_proof = Historical::prove(key).unwrap(); // we change the session index in the key ownership proof @@ -640,10 +576,7 @@ fn report_equivocation_invalid_equivocation_proof() { .unwrap(); // create the key ownership proof - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); let assert_invalid_equivocation = |equivocation_proof| { @@ -753,10 +686,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { CurrentSlot::::get(), ); - let key = ( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - ); + let key = (sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public()); let key_owner_proof = Historical::prove(key).unwrap(); let inner = @@ -815,23 +745,19 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { fn report_equivocation_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. - assert!( - (1..=100) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] == w[1]) - ); + assert!((1..=100) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] == w[1])); // after 100 validators the weight should keep increasing // with every extra validator. - assert!( - (100..=1000) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] < w[1]) - ); + assert!((100..=1000) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] < w[1])); } #[test] @@ -848,11 +774,9 @@ fn valid_equivocation_reports_dont_pay_fees() { generate_equivocation_proof(0, &offending_authority_pair, CurrentSlot::::get()); // create the key ownership proof. - let key_owner_proof = Historical::prove(( - sp_consensus_babe::KEY_TYPE, - &offending_authority_pair.public(), - )) - .unwrap(); + let key_owner_proof = + Historical::prove((sp_consensus_babe::KEY_TYPE, &offending_authority_pair.public())) + .unwrap(); // check the dispatch info for the call. let info = Call::::report_equivocation_unsigned( @@ -894,9 +818,7 @@ fn valid_equivocation_reports_dont_pay_fees() { #[test] fn add_epoch_configurations_migration_works() { - use frame_support::storage::migration::{ - put_storage_value, get_storage_value, - }; + use frame_support::storage::migration::{get_storage_value, put_storage_value}; impl crate::migrations::BabePalletPrefix for Test { fn pallet_prefix() -> &'static str { @@ -905,38 +827,31 @@ fn add_epoch_configurations_migration_works() { } new_test_ext(1).execute_with(|| { - let next_config_descriptor = NextConfigDescriptor::V1 { - c: (3, 4), - allowed_slots: AllowedSlots::PrimarySlots - }; + let next_config_descriptor = + NextConfigDescriptor::V1 { c: (3, 4), allowed_slots: AllowedSlots::PrimarySlots }; - put_storage_value( - b"Babe", - b"NextEpochConfig", - &[], - Some(next_config_descriptor.clone()) - ); + put_storage_value(b"Babe", b"NextEpochConfig", &[], Some(next_config_descriptor.clone())); assert!(get_storage_value::>( b"Babe", b"NextEpochConfig", &[], - ).is_some()); + ) + .is_some()); let current_epoch = BabeEpochConfiguration { c: (1, 4), allowed_slots: sp_consensus_babe::AllowedSlots::PrimarySlots, }; - crate::migrations::add_epoch_configuration::( - current_epoch.clone() - ); + crate::migrations::add_epoch_configuration::(current_epoch.clone()); assert!(get_storage_value::>( b"Babe", b"NextEpochConfig", &[], - ).is_none()); + ) + .is_none()); assert_eq!(EpochConfig::::get(), Some(current_epoch)); assert_eq!(PendingEpochConfigChange::::get(), Some(next_config_descriptor)); diff --git a/substrate/frame/balances/src/benchmarking.rs b/substrate/frame/balances/src/benchmarking.rs index 688bcbc262bdbc108f17ed82a5da04f898df7607..97c3c4309a80d4bdb5e1050c1ff570624c30233c 100644 --- a/substrate/frame/balances/src/benchmarking.rs +++ b/substrate/frame/balances/src/benchmarking.rs @@ -21,8 +21,10 @@ use super::*; +use frame_benchmarking::{ + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelisted_caller, +}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks_instance_pallet, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use crate::Pallet as Balances; @@ -31,7 +33,6 @@ const SEED: u32 = 0; // existential deposit multiplier const ED_MULTIPLIER: u32 = 10; - benchmarks_instance_pallet! { // Benchmark `transfer` extrinsic with the worst possible conditions: // * Transfer will kill the sender account. diff --git a/substrate/frame/balances/src/lib.rs b/substrate/frame/balances/src/lib.rs index 7a092a75b23dbb84f094a659992f3800fdcb78ed..e0f4e1003bbf989355ad90d9b5989e2fac6befd0 100644 --- a/substrate/frame/balances/src/lib.rs +++ b/substrate/frame/balances/src/lib.rs @@ -150,51 +150,58 @@ #[macro_use] mod tests; -mod tests_local; +mod benchmarking; mod tests_composite; +mod tests_local; mod tests_reentrancy; -mod benchmarking; pub mod weights; -use sp_std::prelude::*; -use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr}; -use codec::{Codec, Encode, Decode, MaxEncodedLen}; +pub use self::imbalances::{NegativeImbalance, PositiveImbalance}; +use codec::{Codec, Decode, Encode, MaxEncodedLen}; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; use frame_support::{ - ensure, WeakBoundedVec, + ensure, traits::{ - Currency, OnUnbalanced, TryDrop, StoredMap, - WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, - Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::{AllowDeath, KeepAlive}, - NamedReservableCurrency, - tokens::{fungible, DepositConsequence, WithdrawConsequence, BalanceStatus as Status}, - } + tokens::{fungible, BalanceStatus as Status, DepositConsequence, WithdrawConsequence}, + Currency, ExistenceRequirement, + ExistenceRequirement::{AllowDeath, KeepAlive}, + Get, Imbalance, LockIdentifier, LockableCurrency, NamedReservableCurrency, OnUnbalanced, + ReservableCurrency, SignedImbalance, StoredMap, TryDrop, WithdrawReasons, + }, + WeakBoundedVec, }; -#[cfg(feature = "std")] -use frame_support::traits::GenesisBuild; +use frame_system as system; use sp_runtime::{ - RuntimeDebug, DispatchResult, DispatchError, ArithmeticError, traits::{ - Zero, AtLeast32BitUnsigned, StaticLookup, CheckedAdd, CheckedSub, - MaybeSerializeDeserialize, Saturating, Bounded, + AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, + Saturating, StaticLookup, Zero, }, + ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, }; -use frame_system as system; -pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; +use sp_std::{cmp, fmt::Debug, mem, ops::BitOr, prelude::*, result}; pub use weights::WeightInfo; pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { /// The balance of an account. - type Balance: Parameter + Member + AtLeast32BitUnsigned + Codec + Default + Copy + - MaybeSerializeDeserialize + Debug + MaxEncodedLen; + type Balance: Parameter + + Member + + AtLeast32BitUnsigned + + Codec + + Default + + Copy + + MaybeSerializeDeserialize + + Debug + + MaxEncodedLen; /// Handler for the unbalanced reduction when removing a dust account. type DustRemoval: OnUnbalanced>; @@ -228,7 +235,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::generate_storage_info] - pub struct Pallet(PhantomData<(T, I)>); + pub struct Pallet(PhantomData<(T, I)>); #[pallet::call] impl, I: 'static> Pallet { @@ -267,7 +274,12 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let transactor = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?; + >::transfer( + &transactor, + &dest, + value, + ExistenceRequirement::AllowDeath, + )?; Ok(().into()) } @@ -345,7 +357,12 @@ pub mod pallet { ensure_root(origin)?; let source = T::Lookup::lookup(source)?; let dest = T::Lookup::lookup(dest)?; - >::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?; + >::transfer( + &source, + &dest, + value, + ExistenceRequirement::AllowDeath, + )?; Ok(().into()) } @@ -401,7 +418,12 @@ pub mod pallet { let reducible_balance = Self::reducible_balance(&transactor, keep_alive); let dest = T::Lookup::lookup(dest)?; let keep_alive = if keep_alive { KeepAlive } else { AllowDeath }; - >::transfer(&transactor, &dest, reducible_balance, keep_alive.into())?; + >::transfer( + &transactor, + &dest, + reducible_balance, + keep_alive.into(), + )?; Ok(().into()) } } @@ -496,18 +518,15 @@ pub mod pallet { Blake2_128Concat, T::AccountId, BoundedVec, T::MaxReserves>, - ValueQuery + ValueQuery, >; /// Storage version of the pallet. /// /// This is set to v2.0.0 for new networks. #[pallet::storage] - pub(super) type StorageVersion, I: 'static = ()> = StorageValue< - _, - Releases, - ValueQuery - >; + pub(super) type StorageVersion, I: 'static = ()> = + StorageValue<_, Releases, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { @@ -517,18 +536,14 @@ pub mod pallet { #[cfg(feature = "std")] impl, I: 'static> Default for GenesisConfig { fn default() -> Self { - Self { - balances: Default::default(), - } + Self { balances: Default::default() } } } #[pallet::genesis_build] impl, I: 'static> GenesisBuild for GenesisConfig { fn build(&self) { - let total = self.balances - .iter() - .fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n); + let total = self.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n); >::put(total); >::put(Releases::V2_0_0); @@ -541,12 +556,21 @@ pub mod pallet { } // ensure no duplicates exist. - let endowed_accounts = self.balances.iter().map(|(x, _)| x).cloned().collect::>(); + let endowed_accounts = self + .balances + .iter() + .map(|(x, _)| x) + .cloned() + .collect::>(); - assert!(endowed_accounts.len() == self.balances.len(), "duplicate balances in genesis."); + assert!( + endowed_accounts.len() == self.balances.len(), + "duplicate balances in genesis." + ); for &(ref who, free) in self.balances.iter() { - assert!(T::AccountStore::insert(who, AccountData { free, ..Default::default() }).is_ok()); + assert!(T::AccountStore::insert(who, AccountData { free, ..Default::default() }) + .is_ok()); } } } @@ -564,10 +588,7 @@ impl, I: 'static> GenesisConfig { /// Direct implementation of `GenesisBuild::assimilate_storage`. /// /// Kept in order not to break dependency. - pub fn assimilate_storage( - &self, - storage: &mut sp_runtime::Storage - ) -> Result<(), String> { + pub fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { >::assimilate_storage(self, storage) } } @@ -598,7 +619,9 @@ impl From for Reasons { impl BitOr for Reasons { type Output = Reasons; fn bitor(self, other: Reasons) -> Reasons { - if self == other { return self } + if self == other { + return self + } Reasons::All } } @@ -684,7 +707,9 @@ impl Default for Releases { } } -pub struct DustCleaner, I: 'static = ()>(Option<(T::AccountId, NegativeImbalance)>); +pub struct DustCleaner, I: 'static = ()>( + Option<(T::AccountId, NegativeImbalance)>, +); impl, I: 'static> Drop for DustCleaner { fn drop(&mut self) { @@ -752,7 +777,9 @@ impl, I: 'static> Pallet { amount: T::Balance, account: &AccountData, ) -> DepositConsequence { - if amount.is_zero() { return DepositConsequence::Success } + if amount.is_zero() { + return DepositConsequence::Success + } if TotalIssuance::::get().checked_add(&amount).is_none() { return DepositConsequence::Overflow @@ -778,7 +805,9 @@ impl, I: 'static> Pallet { amount: T::Balance, account: &AccountData, ) -> WithdrawConsequence { - if amount.is_zero() { return WithdrawConsequence::Success } + if amount.is_zero() { + return WithdrawConsequence::Success + } if TotalIssuance::::get().checked_sub(&amount).is_none() { return WithdrawConsequence::Underflow @@ -847,11 +876,10 @@ impl, I: 'static> Pallet { who: &T::AccountId, f: impl FnOnce(&mut AccountData, bool) -> Result, ) -> Result { - Self::try_mutate_account_with_dust(who, f) - .map(|(result, dust_cleaner)| { - drop(dust_cleaner); - result - }) + Self::try_mutate_account_with_dust(who, f).map(|(result, dust_cleaner)| { + drop(dust_cleaner); + result + }) } /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce @@ -944,7 +972,6 @@ impl, I: 'static> Pallet { } } - /// Move the reserved balance of one account into the balance of another, according to `status`. /// /// Is a no-op if: @@ -957,13 +984,15 @@ impl, I: 'static> Pallet { best_effort: bool, status: Status, ) -> Result { - if value.is_zero() { return Ok(Zero::zero()) } + if value.is_zero() { + return Ok(Zero::zero()) + } if slashed == beneficiary { return match status { Status::Free => Ok(Self::unreserve(slashed, value)), Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), - }; + } } let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( @@ -976,21 +1005,30 @@ impl, I: 'static> Pallet { let actual = cmp::min(from_account.reserved, value); ensure!(best_effort || actual == value, Error::::InsufficientBalance); match status { - Status::Free => to_account.free = to_account.free - .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)?, - Status::Reserved => to_account.reserved = to_account.reserved - .checked_add(&actual) - .ok_or(ArithmeticError::Overflow)?, + Status::Free => + to_account.free = to_account + .free + .checked_add(&actual) + .ok_or(ArithmeticError::Overflow)?, + Status::Reserved => + to_account.reserved = to_account + .reserved + .checked_add(&actual) + .ok_or(ArithmeticError::Overflow)?, } from_account.reserved -= actual; Ok(actual) - } + }, ) - } + }, )?; - Self::deposit_event(Event::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); + Self::deposit_event(Event::ReserveRepatriated( + slashed.clone(), + beneficiary.clone(), + actual, + status, + )); Ok(actual) } } @@ -1016,21 +1054,27 @@ impl, I: 'static> fungible::Inspect for Pallet } else { // `must_remain_to_exist` is the part of liquid balance which must remain to keep total over // ED. - let must_remain_to_exist = T::ExistentialDeposit::get().saturating_sub(a.total() - liquid); + let must_remain_to_exist = + T::ExistentialDeposit::get().saturating_sub(a.total() - liquid); liquid.saturating_sub(must_remain_to_exist) } } fn can_deposit(who: &T::AccountId, amount: Self::Balance) -> DepositConsequence { Self::deposit_consequence(who, amount, &Self::account(who)) } - fn can_withdraw(who: &T::AccountId, amount: Self::Balance) -> WithdrawConsequence { + fn can_withdraw( + who: &T::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { Self::withdraw_consequence(who, amount, &Self::account(who)) } } impl, I: 'static> fungible::Mutate for Pallet { fn mint_into(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - if amount.is_zero() { return Ok(()) } + if amount.is_zero() { + return Ok(()) + } Self::try_mutate_account(who, |account, _is_new| -> DispatchResult { Self::deposit_consequence(who, amount, &account).into_result()?; account.free += amount; @@ -1040,14 +1084,22 @@ impl, I: 'static> fungible::Mutate for Pallet { Ok(()) } - fn burn_from(who: &T::AccountId, amount: Self::Balance) -> Result { - if amount.is_zero() { return Ok(Self::Balance::zero()); } - let actual = Self::try_mutate_account(who, |account, _is_new| -> Result { - let extra = Self::withdraw_consequence(who, amount, &account).into_result()?; - let actual = amount + extra; - account.free -= actual; - Ok(actual) - })?; + fn burn_from( + who: &T::AccountId, + amount: Self::Balance, + ) -> Result { + if amount.is_zero() { + return Ok(Self::Balance::zero()) + } + let actual = Self::try_mutate_account( + who, + |account, _is_new| -> Result { + let extra = Self::withdraw_consequence(who, amount, &account).into_result()?; + let actual = amount + extra; + account.free -= actual; + Ok(actual) + }, + )?; TotalIssuance::::mutate(|t| *t -= actual); Ok(actual) } @@ -1061,8 +1113,7 @@ impl, I: 'static> fungible::Transfer for Pallet keep_alive: bool, ) -> Result { let er = if keep_alive { KeepAlive } else { AllowDeath }; - >::transfer(source, dest, amount, er) - .map(|_| amount) + >::transfer(source, dest, amount, er).map(|_| amount) } } @@ -1084,7 +1135,9 @@ impl, I: 'static> fungible::InspectHold for Pallet bool { let a = Self::account(who); let min_balance = T::ExistentialDeposit::get().max(a.frozen(Reasons::All)); - if a.reserved.checked_add(&amount).is_none() { return false } + if a.reserved.checked_add(&amount).is_none() { + return false + } // We require it to be min_balance + amount to ensure that the full reserved funds may be // slashed without compromising locked funds or destroying the account. let required_free = match min_balance.checked_add(&amount) { @@ -1096,7 +1149,9 @@ impl, I: 'static> fungible::InspectHold for Pallet, I: 'static> fungible::MutateHold for Pallet { fn hold(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - if amount.is_zero() { return Ok(()) } + if amount.is_zero() { + return Ok(()) + } ensure!(Self::can_reserve(who, amount), Error::::InsufficientBalance); Self::mutate_account(who, |a| { a.free -= amount; @@ -1104,10 +1159,14 @@ impl, I: 'static> fungible::MutateHold for Pallet Result - { - if amount.is_zero() { return Ok(amount) } + fn release( + who: &T::AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result { + if amount.is_zero() { + return Ok(amount) + } // Done on a best-effort basis. Self::try_mutate_account(who, |a, _| { let new_free = a.free.saturating_add(amount.min(a.reserved)); @@ -1134,12 +1193,9 @@ impl, I: 'static> fungible::MutateHold for Pallet, I: 'static> Drop for PositiveImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - >::mutate( - |v| *v = v.saturating_add(self.0) - ); + >::mutate(|v| *v = v.saturating_add(self.0)); } } impl, I: 'static> Drop for NegativeImbalance { /// Basic drop handler will just square up the total issuance. fn drop(&mut self) { - >::mutate( - |v| *v = v.saturating_sub(self.0) - ); + >::mutate(|v| *v = v.saturating_sub(self.0)); } } } -impl, I: 'static> Currency for Pallet where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: 'static> Currency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, { type Balance = T::Balance; type PositiveImbalance = PositiveImbalance; @@ -1317,7 +1370,9 @@ impl, I: 'static> Currency for Pallet where // Check if `value` amount of free balance can be slashed from `who`. fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { return true } + if value.is_zero() { + return true + } Self::free_balance(who) >= value } @@ -1332,7 +1387,9 @@ impl, I: 'static> Currency for Pallet where // Burn funds from the total issuance, returning a positive imbalance for the amount burned. // Is a no-op if amount to be burned is zero. fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { - if amount.is_zero() { return PositiveImbalance::zero() } + if amount.is_zero() { + return PositiveImbalance::zero() + } >::mutate(|issued| { *issued = issued.checked_sub(&amount).unwrap_or_else(|| { amount = *issued; @@ -1346,13 +1403,15 @@ impl, I: 'static> Currency for Pallet where // for the amount issued. // Is a no-op if amount to be issued it zero. fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { - if amount.is_zero() { return NegativeImbalance::zero() } - >::mutate(|issued| + if amount.is_zero() { + return NegativeImbalance::zero() + } + >::mutate(|issued| { *issued = issued.checked_add(&amount).unwrap_or_else(|| { amount = Self::Balance::max_value() - *issued; Self::Balance::max_value() }) - ); + }); NegativeImbalance::new(amount) } @@ -1374,7 +1433,9 @@ impl, I: 'static> Currency for Pallet where reasons: WithdrawReasons, new_balance: T::Balance, ) -> DispatchResult { - if amount.is_zero() { return Ok(()) } + if amount.is_zero() { + return Ok(()) + } let min_balance = Self::account(who).frozen(reasons.into()); ensure!(new_balance >= min_balance, Error::::LiquidityRestrictions); Ok(()) @@ -1388,7 +1449,9 @@ impl, I: 'static> Currency for Pallet where value: Self::Balance, existence_requirement: ExistenceRequirement, ) -> DispatchResult { - if value.is_zero() || transactor == dest { return Ok(()) } + if value.is_zero() || transactor == dest { + return Ok(()) + } Self::try_mutate_account_with_dust( dest, @@ -1396,12 +1459,15 @@ impl, I: 'static> Currency for Pallet where Self::try_mutate_account_with_dust( transactor, |from_account, _| -> DispatchResult { - from_account.free = from_account.free.checked_sub(&value) + from_account.free = from_account + .free + .checked_sub(&value) .ok_or(Error::::InsufficientBalance)?; // NOTE: total stake being stored in the same type means that this could never overflow // but better to be safe than sorry. - to_account.free = to_account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + to_account.free = + to_account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; let ed = T::ExistentialDeposit::get(); ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); @@ -1411,18 +1477,24 @@ impl, I: 'static> Currency for Pallet where value, WithdrawReasons::TRANSFER, from_account.free, - ).map_err(|_| Error::::LiquidityRestrictions)?; + ) + .map_err(|_| Error::::LiquidityRestrictions)?; // TODO: This is over-conservative. There may now be other providers, and this pallet // may not even be a provider. let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; - let allow_death = allow_death && !system::Pallet::::is_provider_required(transactor); - ensure!(allow_death || from_account.total() >= ed, Error::::KeepAlive); + let allow_death = + allow_death && !system::Pallet::::is_provider_required(transactor); + ensure!( + allow_death || from_account.total() >= ed, + Error::::KeepAlive + ); Ok(()) - } - ).map(|(_, maybe_dust_cleaner)| maybe_dust_cleaner) - } + }, + ) + .map(|(_, maybe_dust_cleaner)| maybe_dust_cleaner) + }, )?; // Emit transfer event. @@ -1440,23 +1512,30 @@ impl, I: 'static> Currency for Pallet where /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having /// to draw from reserved funds, however we err on the side of punishment if things are inconsistent /// or `can_slash` wasn't used appropriately. - fn slash( - who: &T::AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } - if Self::total_balance(&who).is_zero() { return (NegativeImbalance::zero(), value) } + fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } + if Self::total_balance(&who).is_zero() { + return (NegativeImbalance::zero(), value) + } for attempt in 0..2 { - match Self::try_mutate_account(who, - |account, _is_new| -> Result<(Self::NegativeImbalance, Self::Balance), DispatchError> { + match Self::try_mutate_account( + who, + |account, + _is_new| + -> Result<(Self::NegativeImbalance, Self::Balance), DispatchError> { // Best value is the most amount we can slash following liveness rules. let best_value = match attempt { // First attempt we try to slash the full amount, and see if liveness issues happen. 0 => value, // If acting as a critical provider (i.e. first attempt failed), then slash // as much as possible while leaving at least at ED. - _ => value.min((account.free + account.reserved).saturating_sub(T::ExistentialDeposit::get())), + _ => value.min( + (account.free + account.reserved) + .saturating_sub(T::ExistentialDeposit::get()), + ), }; let free_slash = cmp::min(account.free, best_value); @@ -1469,7 +1548,7 @@ impl, I: 'static> Currency for Pallet where account.reserved -= reserved_slash; // Safe because of above check Ok(( NegativeImbalance::new(free_slash + reserved_slash), - value - free_slash - reserved_slash, // Safe because value is gt or eq total slashed + value - free_slash - reserved_slash, /* Safe because value is gt or eq total slashed */ )) } else { // Else we are done! @@ -1478,7 +1557,7 @@ impl, I: 'static> Currency for Pallet where value - free_slash, // Safe because value is gt or eq to total slashed )) } - } + }, ) { Ok(r) => return r, Err(_) => (), @@ -1494,15 +1573,20 @@ impl, I: 'static> Currency for Pallet where /// Is a no-op if the `value` to be deposited is zero. fn deposit_into_existing( who: &T::AccountId, - value: Self::Balance + value: Self::Balance, ) -> Result { - if value.is_zero() { return Ok(PositiveImbalance::zero()) } + if value.is_zero() { + return Ok(PositiveImbalance::zero()) + } - Self::try_mutate_account(who, |account, is_new| -> Result { - ensure!(!is_new, Error::::DeadAccount); - account.free = account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; - Ok(PositiveImbalance::new(value)) - }) + Self::try_mutate_account( + who, + |account, is_new| -> Result { + ensure!(!is_new, Error::::DeadAccount); + account.free = account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + Ok(PositiveImbalance::new(value)) + }, + ) } /// Deposit some `value` into the free balance of `who`, possibly creating a new account. @@ -1512,26 +1596,28 @@ impl, I: 'static> Currency for Pallet where /// - the `value` to be deposited is less than the required ED and the account does not yet exist; or /// - the deposit would necessitate the account to exist and there are no provider references; or /// - `value` is so large it would cause the balance of `who` to overflow. - fn deposit_creating( - who: &T::AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance { - if value.is_zero() { return Self::PositiveImbalance::zero() } - - let r = Self::try_mutate_account(who, |account, is_new| -> Result { - - let ed = T::ExistentialDeposit::get(); - ensure!(value >= ed || !is_new, Error::::ExistentialDeposit); + fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { + if value.is_zero() { + return Self::PositiveImbalance::zero() + } - // defensive only: overflow should never happen, however in case it does, then this - // operation is a no-op. - account.free = match account.free.checked_add(&value) { - Some(x) => x, - None => return Ok(Self::PositiveImbalance::zero()), - }; + let r = Self::try_mutate_account( + who, + |account, is_new| -> Result { + let ed = T::ExistentialDeposit::get(); + ensure!(value >= ed || !is_new, Error::::ExistentialDeposit); + + // defensive only: overflow should never happen, however in case it does, then this + // operation is a no-op. + account.free = match account.free.checked_add(&value) { + Some(x) => x, + None => return Ok(Self::PositiveImbalance::zero()), + }; - Ok(PositiveImbalance::new(value)) - }).unwrap_or_else(|_| Self::PositiveImbalance::zero()); + Ok(PositiveImbalance::new(value)) + }, + ) + .unwrap_or_else(|_| Self::PositiveImbalance::zero()); r } @@ -1545,70 +1631,79 @@ impl, I: 'static> Currency for Pallet where reasons: WithdrawReasons, liveness: ExistenceRequirement, ) -> result::Result { - if value.is_zero() { return Ok(NegativeImbalance::zero()); } + if value.is_zero() { + return Ok(NegativeImbalance::zero()) + } - Self::try_mutate_account(who, |account, _| - -> Result - { - let new_free_account = account.free.checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; + Self::try_mutate_account( + who, + |account, _| -> Result { + let new_free_account = + account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; - // bail if we need to keep the account alive and this would kill it. - let ed = T::ExistentialDeposit::get(); - let would_be_dead = new_free_account + account.reserved < ed; - let would_kill = would_be_dead && account.free + account.reserved >= ed; - ensure!(liveness == AllowDeath || !would_kill, Error::::KeepAlive); + // bail if we need to keep the account alive and this would kill it. + let ed = T::ExistentialDeposit::get(); + let would_be_dead = new_free_account + account.reserved < ed; + let would_kill = would_be_dead && account.free + account.reserved >= ed; + ensure!(liveness == AllowDeath || !would_kill, Error::::KeepAlive); - Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; + Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; - account.free = new_free_account; + account.free = new_free_account; - Ok(NegativeImbalance::new(value)) - }) + Ok(NegativeImbalance::new(value)) + }, + ) } /// Force the new free balance of a target account `who` to some new value `balance`. - fn make_free_balance_be(who: &T::AccountId, value: Self::Balance) - -> SignedImbalance - { - Self::try_mutate_account(who, |account, is_new| - -> Result, DispatchError> - { - let ed = T::ExistentialDeposit::get(); - let total = value.saturating_add(account.reserved); - // If we're attempting to set an existing account to less than ED, then - // bypass the entire operation. It's a no-op if you follow it through, but - // since this is an instance where we might account for a negative imbalance - // (in the dust cleaner of set_account) before we account for its actual - // equal and opposite cause (returned as an Imbalance), then in the - // instance that there's no other accounts on the system at all, we might - // underflow the issuance and our arithmetic will be off. - ensure!(total >= ed || !is_new, Error::::ExistentialDeposit); - - let imbalance = if account.free <= value { - SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) - } else { - SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) - }; - account.free = value; - Ok(imbalance) - }).unwrap_or_else(|_| SignedImbalance::Positive(Self::PositiveImbalance::zero())) + fn make_free_balance_be( + who: &T::AccountId, + value: Self::Balance, + ) -> SignedImbalance { + Self::try_mutate_account( + who, + |account, + is_new| + -> Result, DispatchError> { + let ed = T::ExistentialDeposit::get(); + let total = value.saturating_add(account.reserved); + // If we're attempting to set an existing account to less than ED, then + // bypass the entire operation. It's a no-op if you follow it through, but + // since this is an instance where we might account for a negative imbalance + // (in the dust cleaner of set_account) before we account for its actual + // equal and opposite cause (returned as an Imbalance), then in the + // instance that there's no other accounts on the system at all, we might + // underflow the issuance and our arithmetic will be off. + ensure!(total >= ed || !is_new, Error::::ExistentialDeposit); + + let imbalance = if account.free <= value { + SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) + } else { + SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) + }; + account.free = value; + Ok(imbalance) + }, + ) + .unwrap_or_else(|_| SignedImbalance::Positive(Self::PositiveImbalance::zero())) } } -impl, I: 'static> ReservableCurrency for Pallet where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: 'static> ReservableCurrency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, { /// Check if `who` can reserve `value` from their free balance. /// /// Always `true` if value to be reserved is zero. fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { return true } - Self::account(who).free - .checked_sub(&value) - .map_or(false, |new_balance| - Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, new_balance).is_ok() - ) + if value.is_zero() { + return true + } + Self::account(who).free.checked_sub(&value).map_or(false, |new_balance| { + Self::ensure_can_withdraw(who, value, WithdrawReasons::RESERVE, new_balance).is_ok() + }) } fn reserved_balance(who: &T::AccountId) -> Self::Balance { @@ -1619,11 +1714,15 @@ impl, I: 'static> ReservableCurrency for Pallet /// /// Is a no-op if value to be reserved is zero. fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { - if value.is_zero() { return Ok(()) } + if value.is_zero() { + return Ok(()) + } Self::try_mutate_account(who, |account, _| -> DispatchResult { - account.free = account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; - account.reserved = account.reserved.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + account.free = + account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; + account.reserved = + account.reserved.checked_add(&value).ok_or(ArithmeticError::Overflow)?; Self::ensure_can_withdraw(&who, value.clone(), WithdrawReasons::RESERVE, account.free) })?; @@ -1635,8 +1734,12 @@ impl, I: 'static> ReservableCurrency for Pallet /// /// Is a no-op if the value to be unreserved is zero or the account does not exist. fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { - if value.is_zero() { return Zero::zero() } - if Self::total_balance(&who).is_zero() { return value } + if value.is_zero() { + return Zero::zero() + } + if Self::total_balance(&who).is_zero() { + return value + } let actual = match Self::mutate_account(who, |account| { let actual = cmp::min(account.reserved, value); @@ -1652,7 +1755,7 @@ impl, I: 'static> ReservableCurrency for Pallet // If it ever does, then we should fail gracefully though, indicating that nothing // could be done. return value - } + }, }; Self::deposit_event(Event::Unreserved(who.clone(), actual.clone())); @@ -1665,10 +1768,14 @@ impl, I: 'static> ReservableCurrency for Pallet /// Is a no-op if the value to be slashed is zero or the account does not exist. fn slash_reserved( who: &T::AccountId, - value: Self::Balance + value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } - if Self::total_balance(&who).is_zero() { return (NegativeImbalance::zero(), value) } + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } + if Self::total_balance(&who).is_zero() { + return (NegativeImbalance::zero(), value) + } // NOTE: `mutate_account` may fail if it attempts to reduce the balance to the point that an // account is attempted to be illegally destroyed. @@ -1679,7 +1786,10 @@ impl, I: 'static> ReservableCurrency for Pallet 0 => value, // If acting as a critical provider (i.e. first attempt failed), then ensure // slash leaves at least the ED. - _ => value.min((account.free + account.reserved).saturating_sub(T::ExistentialDeposit::get())), + _ => value.min( + (account.free + account.reserved) + .saturating_sub(T::ExistentialDeposit::get()), + ), }; let actual = cmp::min(account.reserved, best_value); @@ -1713,8 +1823,9 @@ impl, I: 'static> ReservableCurrency for Pallet } } -impl, I: 'static> NamedReservableCurrency for Pallet where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: 'static> NamedReservableCurrency for Pallet +where + T::Balance: MaybeSerializeDeserialize + Debug, { type ReserveIdentifier = T::ReserveIdentifier; @@ -1729,8 +1840,14 @@ impl, I: 'static> NamedReservableCurrency for Pallet< /// Move `value` from the free balance from `who` to a named reserve balance. /// /// Is a no-op if value to be reserved is zero. - fn reserve_named(id: &Self::ReserveIdentifier, who: &T::AccountId, value: Self::Balance) -> DispatchResult { - if value.is_zero() { return Ok(()) } + fn reserve_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> DispatchResult { + if value.is_zero() { + return Ok(()) + } Reserves::::try_mutate(who, |reserves| -> DispatchResult { match reserves.binary_search_by_key(id, |data| data.id) { @@ -1739,10 +1856,9 @@ impl, I: 'static> NamedReservableCurrency for Pallet< reserves[index].amount = reserves[index].amount.saturating_add(value); }, Err(index) => { - reserves.try_insert(index, ReserveData { - id: id.clone(), - amount: value - }).map_err(|_| Error::::TooManyReserves)?; + reserves + .try_insert(index, ReserveData { id: id.clone(), amount: value }) + .map_err(|_| Error::::TooManyReserves)?; }, }; >::reserve(who, value)?; @@ -1753,8 +1869,14 @@ impl, I: 'static> NamedReservableCurrency for Pallet< /// Unreserve some funds, returning any amount that was unable to be unreserved. /// /// Is a no-op if the value to be unreserved is zero. - fn unreserve_named(id: &Self::ReserveIdentifier, who: &T::AccountId, value: Self::Balance) -> Self::Balance { - if value.is_zero() { return Zero::zero() } + fn unreserve_named( + id: &Self::ReserveIdentifier, + who: &T::AccountId, + value: Self::Balance, + ) -> Self::Balance { + if value.is_zero() { + return Zero::zero() + } Reserves::::mutate_exists(who, |maybe_reserves| -> Self::Balance { if let Some(reserves) = maybe_reserves.as_mut() { @@ -1782,9 +1904,7 @@ impl, I: 'static> NamedReservableCurrency for Pallet< value - actual }, - Err(_) => { - value - }, + Err(_) => value, } } else { value @@ -1799,16 +1919,19 @@ impl, I: 'static> NamedReservableCurrency for Pallet< fn slash_reserved_named( id: &Self::ReserveIdentifier, who: &T::AccountId, - value: Self::Balance + value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()) + } Reserves::::mutate(who, |reserves| -> (Self::NegativeImbalance, Self::Balance) { match reserves.binary_search_by_key(id, |data| data.id) { Ok(index) => { let to_change = cmp::min(reserves[index].amount, value); - let (imb, remain) = >::slash_reserved(who, to_change); + let (imb, remain) = + >::slash_reserved(who, to_change); // remain should always be zero but just to be defensive here let actual = to_change.saturating_sub(remain); @@ -1818,9 +1941,7 @@ impl, I: 'static> NamedReservableCurrency for Pallet< (imb, value - actual) }, - Err(_) => { - (NegativeImbalance::zero(), value) - }, + Err(_) => (NegativeImbalance::zero(), value), } }) } @@ -1838,13 +1959,16 @@ impl, I: 'static> NamedReservableCurrency for Pallet< value: Self::Balance, status: Status, ) -> Result { - if value.is_zero() { return Ok(Zero::zero()) } + if value.is_zero() { + return Ok(Zero::zero()) + } if slashed == beneficiary { return match status { Status::Free => Ok(Self::unreserve_named(id, slashed, value)), - Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), - }; + Status::Reserved => + Ok(value.saturating_sub(Self::reserved_balance_named(id, slashed))), + } } Reserves::::try_mutate(slashed, |reserves| -> Result { @@ -1854,36 +1978,59 @@ impl, I: 'static> NamedReservableCurrency for Pallet< let actual = if status == Status::Reserved { // make it the reserved under same identifier - Reserves::::try_mutate(beneficiary, |reserves| -> Result { - match reserves.binary_search_by_key(id, |data| data.id) { - Ok(index) => { - let remain = >::repatriate_reserved(slashed, beneficiary, to_change, status)?; - - // remain should always be zero but just to be defensive here - let actual = to_change.saturating_sub(remain); - - // this add can't overflow but just to be defensive. - reserves[index].amount = reserves[index].amount.saturating_add(actual); - - Ok(actual) - }, - Err(index) => { - let remain = >::repatriate_reserved(slashed, beneficiary, to_change, status)?; - - // remain should always be zero but just to be defensive here - let actual = to_change.saturating_sub(remain); - - reserves.try_insert(index, ReserveData { - id: id.clone(), - amount: actual - }).map_err(|_| Error::::TooManyReserves)?; - - Ok(actual) - }, - } - })? + Reserves::::try_mutate( + beneficiary, + |reserves| -> Result { + match reserves.binary_search_by_key(id, |data| data.id) { + Ok(index) => { + let remain = + >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + // this add can't overflow but just to be defensive. + reserves[index].amount = + reserves[index].amount.saturating_add(actual); + + Ok(actual) + }, + Err(index) => { + let remain = + >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; + + // remain should always be zero but just to be defensive here + let actual = to_change.saturating_sub(remain); + + reserves + .try_insert( + index, + ReserveData { id: id.clone(), amount: actual }, + ) + .map_err(|_| Error::::TooManyReserves)?; + + Ok(actual) + }, + } + }, + )? } else { - let remain = >::repatriate_reserved(slashed, beneficiary, to_change, status)?; + let remain = >::repatriate_reserved( + slashed, + beneficiary, + to_change, + status, + )?; // remain should always be zero but just to be defensive here to_change.saturating_sub(remain) @@ -1894,9 +2041,7 @@ impl, I: 'static> NamedReservableCurrency for Pallet< Ok(value - actual) }, - Err(_) => { - Ok(value) - }, + Err(_) => Ok(value), } }) } @@ -1904,7 +2049,7 @@ impl, I: 'static> NamedReservableCurrency for Pallet< impl, I: 'static> LockableCurrency for Pallet where - T::Balance: MaybeSerializeDeserialize + Debug + T::Balance: MaybeSerializeDeserialize + Debug, { type Moment = T::BlockNumber; @@ -1918,9 +2063,12 @@ where amount: T::Balance, reasons: WithdrawReasons, ) { - if amount.is_zero() || reasons.is_empty() { return } + if amount.is_zero() || reasons.is_empty() { + return + } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who).into_iter() + let mut locks = Self::locks(who) + .into_iter() .filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) }) .collect::>(); if let Some(lock) = new_lock { @@ -1937,30 +2085,31 @@ where amount: T::Balance, reasons: WithdrawReasons, ) { - if amount.is_zero() || reasons.is_empty() { return } + if amount.is_zero() || reasons.is_empty() { + return + } let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who).into_iter().filter_map(|l| - if l.id == id { - new_lock.take().map(|nl| { - BalanceLock { + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| { + if l.id == id { + new_lock.take().map(|nl| BalanceLock { id: l.id, amount: l.amount.max(nl.amount), reasons: l.reasons | nl.reasons, - } - }) - } else { - Some(l) - }).collect::>(); + }) + } else { + Some(l) + } + }) + .collect::>(); if let Some(lock) = new_lock { locks.push(lock) } Self::update_locks(who, &locks[..]); } - fn remove_lock( - id: LockIdentifier, - who: &T::AccountId, - ) { + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { let mut locks = Self::locks(who); locks.retain(|l| l.id != id); Self::update_locks(who, &locks[..]); diff --git a/substrate/frame/balances/src/tests_composite.rs b/substrate/frame/balances/src/tests_composite.rs index 1d90b3e70b924b35bbd4844fbbea6fdc42874ce4..e2d50e8b88aad1a7379b1d7beb6f7302d0c6b43c 100644 --- a/substrate/frame/balances/src/tests_composite.rs +++ b/substrate/frame/balances/src/tests_composite.rs @@ -19,19 +19,15 @@ #![cfg(test)] -use sp_runtime::{ - traits::IdentityLookup, - testing::Header, +use crate::{self as pallet_balances, decl_tests, Config, Pallet}; +use frame_support::{ + parameter_types, + weights::{DispatchInfo, IdentityFee, Weight}, }; +use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; use sp_io; -use frame_support::parameter_types; -use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; -use pallet_transaction_payment::CurrencyAdapter; -use crate::{ - self as pallet_balances, - Pallet, Config, decl_tests, -}; +use sp_runtime::{testing::Header, traits::IdentityLookup}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -110,10 +106,7 @@ pub struct ExtBuilder { } impl Default for ExtBuilder { fn default() -> Self { - Self { - existential_deposit: 1, - monied: false, - } + Self { existential_deposit: 1, monied: false } } } impl ExtBuilder { @@ -138,12 +131,14 @@ impl ExtBuilder { (2, 20 * self.existential_deposit), (3, 30 * self.existential_deposit), (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) + (12, 10 * self.existential_deposit), ] } else { vec![] }, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); @@ -151,4 +146,4 @@ impl ExtBuilder { } } -decl_tests!{ Test, ExtBuilder, EXISTENTIAL_DEPOSIT } +decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } diff --git a/substrate/frame/balances/src/tests_local.rs b/substrate/frame/balances/src/tests_local.rs index 36351252b445c821e5d86c0c0dd861d33e582a6c..668c335376c60434890cf50bdbcddb0ec377ba17 100644 --- a/substrate/frame/balances/src/tests_local.rs +++ b/substrate/frame/balances/src/tests_local.rs @@ -19,20 +19,16 @@ #![cfg(test)] -use sp_runtime::{ - traits::IdentityLookup, - testing::Header, +use crate::{self as pallet_balances, decl_tests, Config, Pallet}; +use frame_support::{ + parameter_types, + traits::StorageMapShim, + weights::{DispatchInfo, IdentityFee, Weight}, }; +use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; use sp_io; -use frame_support::parameter_types; -use frame_support::traits::StorageMapShim; -use frame_support::weights::{Weight, DispatchInfo, IdentityFee}; -use crate::{ - self as pallet_balances, - Pallet, Config, decl_tests, -}; -use pallet_transaction_payment::CurrencyAdapter; +use sp_runtime::{testing::Header, traits::IdentityLookup}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -98,12 +94,8 @@ impl Config for Test { type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; - type AccountStore = StorageMapShim< - super::Account, - system::Provider, - u64, - super::AccountData, - >; + type AccountStore = + StorageMapShim, system::Provider, u64, super::AccountData>; type MaxLocks = MaxLocks; type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; @@ -116,10 +108,7 @@ pub struct ExtBuilder { } impl Default for ExtBuilder { fn default() -> Self { - Self { - existential_deposit: 1, - monied: false, - } + Self { existential_deposit: 1, monied: false } } } impl ExtBuilder { @@ -147,12 +136,14 @@ impl ExtBuilder { (2, 20 * self.existential_deposit), (3, 30 * self.existential_deposit), (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) + (12, 10 * self.existential_deposit), ] } else { vec![] }, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); @@ -160,40 +151,37 @@ impl ExtBuilder { } } -decl_tests!{ Test, ExtBuilder, EXISTENTIAL_DEPOSIT } +decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } #[test] fn emit_events_with_no_existential_deposit_suicide_with_dust() { - ::default() - .existential_deposit(2) - .build() - .execute_with(|| { - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); - - assert_eq!( - events(), - [ - Event::System(system::Event::NewAccount(1)), - Event::Balances(crate::Event::Endowed(1, 100)), - Event::Balances(crate::Event::BalanceSet(1, 100, 0)), - ] - ); - - let res = Balances::slash(&1, 98); - assert_eq!(res, (NegativeImbalance::new(98), 0)); - - // no events - assert_eq!(events(), []); - - let res = Balances::slash(&1, 1); - assert_eq!(res, (NegativeImbalance::new(1), 0)); - - assert_eq!( - events(), - [ - Event::System(system::Event::KilledAccount(1)), - Event::Balances(crate::Event::DustLost(1, 1)), - ] - ); - }); + ::default().existential_deposit(2).build().execute_with(|| { + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 100, 0)); + + assert_eq!( + events(), + [ + Event::System(system::Event::NewAccount(1)), + Event::Balances(crate::Event::Endowed(1, 100)), + Event::Balances(crate::Event::BalanceSet(1, 100, 0)), + ] + ); + + let res = Balances::slash(&1, 98); + assert_eq!(res, (NegativeImbalance::new(98), 0)); + + // no events + assert_eq!(events(), []); + + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); + + assert_eq!( + events(), + [ + Event::System(system::Event::KilledAccount(1)), + Event::Balances(crate::Event::DustLost(1, 1)), + ] + ); + }); } diff --git a/substrate/frame/balances/src/tests_reentrancy.rs b/substrate/frame/balances/src/tests_reentrancy.rs index 2a3a60dfde842baaea163b6fd817184ff826889a..8682949b2c55d02888c6d50147f01b87f0349020 100644 --- a/substrate/frame/balances/src/tests_reentrancy.rs +++ b/substrate/frame/balances/src/tests_reentrancy.rs @@ -19,27 +19,17 @@ #![cfg(test)] -use sp_runtime::{ - traits::IdentityLookup, - testing::Header, -}; +use crate::{self as pallet_balances, Config, Pallet}; +use frame_support::{parameter_types, traits::StorageMapShim, weights::IdentityFee}; +use pallet_transaction_payment::CurrencyAdapter; use sp_core::H256; use sp_io; -use frame_support::parameter_types; -use frame_support::traits::StorageMapShim; -use frame_support::weights::{IdentityFee}; -use crate::{ - self as pallet_balances, - Pallet, Config, -}; -use pallet_transaction_payment::CurrencyAdapter; +use sp_runtime::{testing::Header, traits::IdentityLookup}; use crate::*; use frame_support::{ assert_ok, - traits::{ - Currency, ReservableCurrency, - } + traits::{Currency, ReservableCurrency}, }; use frame_system::RawOrigin; @@ -113,12 +103,8 @@ impl Config for Test { type DustRemoval = OnDustRemoval; type Event = Event; type ExistentialDeposit = ExistentialDeposit; - type AccountStore = StorageMapShim< - super::Account, - system::Provider, - u64, - super::AccountData, - >; + type AccountStore = + StorageMapShim, system::Provider, u64, super::AccountData>; type MaxLocks = MaxLocks; type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; @@ -130,13 +116,10 @@ pub struct ExtBuilder { } impl Default for ExtBuilder { fn default() -> Self { - Self { - existential_deposit: 1, - } + Self { existential_deposit: 1 } } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { self.existential_deposit = existential_deposit; self @@ -149,9 +132,9 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![] } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -160,112 +143,103 @@ impl ExtBuilder { #[test] fn transfer_dust_removal_tst1_should_work() { - ExtBuilder::default() - .existential_deposit(100) - .build() - .execute_with(|| { - // Verification of reentrancy in dust removal - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); - - // In this transaction, account 2 free balance - // drops below existential balance - // and dust balance is removed from account 2 - assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 3, 450)); - - // As expected dust balance is removed. - assert_eq!(Balances::free_balance(&2), 0); - - // As expected beneficiary account 3 - // received the transfered fund. - assert_eq!(Balances::free_balance(&3), 450); - - // Dust balance is deposited to account 1 - // during the process of dust removal. - assert_eq!(Balances::free_balance(&1), 1050); - - // Verify the events - // Number of events expected is 8 - assert_eq!(System::events().len(), 11); - - System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 3, 450))); - System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); - } - ); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 3, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // As expected beneficiary account 3 + // received the transfered fund. + assert_eq!(Balances::free_balance(&3), 450); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1050); + + // Verify the events + // Number of events expected is 8 + assert_eq!(System::events().len(), 11); + + System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 3, 450))); + System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); + }); } #[test] fn transfer_dust_removal_tst2_should_work() { - ExtBuilder::default() - .existential_deposit(100) - .build() - .execute_with(|| { - // Verification of reentrancy in dust removal - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); - - // In this transaction, account 2 free balance - // drops below existential balance - // and dust balance is removed from account 2 - assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 1, 450)); - - // As expected dust balance is removed. - assert_eq!(Balances::free_balance(&2), 0); - - // Dust balance is deposited to account 1 - // during the process of dust removal. - assert_eq!(Balances::free_balance(&1), 1500); - - // Verify the events - // Number of events expected is 8 - assert_eq!(System::events().len(), 9); - - System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 1, 450))); - System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); - } - ); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 1, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1500); + + // Verify the events + // Number of events expected is 8 + assert_eq!(System::events().len(), 9); + + System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 1, 450))); + System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); + }); } #[test] fn repatriating_reserved_balance_dust_removal_should_work() { - ExtBuilder::default() - .existential_deposit(100) - .build() - .execute_with(|| { - // Verification of reentrancy in dust removal - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); - assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); - - // Reserve a value on account 2, - // Such that free balance is lower than - // Exestintial deposit. - assert_ok!(Balances::reserve(&2, 450)); - - // Transfer of reserved fund from slashed account 2 to - // beneficiary account 1 - assert_ok!(Balances::repatriate_reserved(&2, &1, 450, Status::Free), 0); - - // Since free balance of account 2 is lower than - // existential deposit, dust amount is - // removed from the account 2 - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(Balances::free_balance(2), 0); - - // account 1 is credited with reserved amount - // together with dust balance during dust - // removal. - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(1), 1500); - - // Verify the events - // Number of events expected is 10 - assert_eq!(System::events().len(), 10); - - System::assert_has_event(Event::Balances( - crate::Event::ReserveRepatriated(2, 1, 450, Status::Free), - )); - - System::assert_last_event(Event::Balances(crate::Event::DustLost(2, 50))); - } - ); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // Reserve a value on account 2, + // Such that free balance is lower than + // Exestintial deposit. + assert_ok!(Balances::reserve(&2, 450)); + + // Transfer of reserved fund from slashed account 2 to + // beneficiary account 1 + assert_ok!(Balances::repatriate_reserved(&2, &1, 450, Status::Free), 0); + + // Since free balance of account 2 is lower than + // existential deposit, dust amount is + // removed from the account 2 + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::free_balance(2), 0); + + // account 1 is credited with reserved amount + // together with dust balance during dust + // removal. + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 1500); + + // Verify the events + // Number of events expected is 10 + assert_eq!(System::events().len(), 10); + + System::assert_has_event(Event::Balances(crate::Event::ReserveRepatriated( + 2, + 1, + 450, + Status::Free, + ))); + + System::assert_last_event(Event::Balances(crate::Event::DustLost(2, 50))); + }); } diff --git a/substrate/frame/balances/src/weights.rs b/substrate/frame/balances/src/weights.rs index 79e6445dd6bb6624ec12601dddcafaee717a878e..d1e86ce45e4b33dea45789cb0a7024b988f99580 100644 --- a/substrate/frame/balances/src/weights.rs +++ b/substrate/frame/balances/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/benchmarking/src/analysis.rs b/substrate/frame/benchmarking/src/analysis.rs index f37ffba51f3d0d2f7e1e3cdf06e38a84a8b36cf0..fffa6828cede908844009dcbc3ee92e6beaee1e8 100644 --- a/substrate/frame/benchmarking/src/analysis.rs +++ b/substrate/frame/benchmarking/src/analysis.rs @@ -17,10 +17,10 @@ //! Tools for analyzing the benchmark results. -use std::collections::BTreeMap; +use crate::BenchmarkResults; use core::convert::TryFrom; use linregress::{FormulaRegressionBuilder, RegressionDataBuilder}; -use crate::BenchmarkResults; +use std::collections::BTreeMap; pub use linregress::RegressionModel; @@ -63,14 +63,12 @@ impl TryFrom> for AnalysisChoice { fn try_from(s: Option) -> Result { match s { None => Ok(AnalysisChoice::default()), - Some(i) => { - match &i[..] { - "min-squares" | "min_squares" => Ok(AnalysisChoice::MinSquares), - "median-slopes" | "median_slopes" => Ok(AnalysisChoice::MedianSlopes), - "max" => Ok(AnalysisChoice::Max), - _ => Err("invalid analysis string") - } - } + Some(i) => match &i[..] { + "min-squares" | "min_squares" => Ok(AnalysisChoice::MinSquares), + "median-slopes" | "median_slopes" => Ok(AnalysisChoice::MedianSlopes), + "max" => Ok(AnalysisChoice::Max), + _ => Err("invalid analysis string"), + }, } } } @@ -79,17 +77,20 @@ impl Analysis { // Useful for when there are no components, and we just need an median value of the benchmark results. // Note: We choose the median value because it is more robust to outliers. fn median_value(r: &Vec, selector: BenchmarkSelector) -> Option { - if r.is_empty() { return None } + if r.is_empty() { + return None + } - let mut values: Vec = r.iter().map(|result| - match selector { + let mut values: Vec = r + .iter() + .map(|result| match selector { BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, BenchmarkSelector::StorageRootTime => result.storage_root_time, BenchmarkSelector::Reads => result.reads.into(), BenchmarkSelector::Writes => result.writes.into(), BenchmarkSelector::ProofSize => result.proof_size.into(), - } - ).collect(); + }) + .collect(); values.sort(); let mid = values.len() / 2; @@ -104,64 +105,80 @@ impl Analysis { } pub fn median_slopes(r: &Vec, selector: BenchmarkSelector) -> Option { - if r[0].components.is_empty() { return Self::median_value(r, selector) } - - let results = r[0].components.iter().enumerate().map(|(i, &(param, _))| { - let mut counted = BTreeMap::, usize>::new(); - for result in r.iter() { - let mut p = result.components.iter().map(|x| x.1).collect::>(); - p[i] = 0; - *counted.entry(p).or_default() += 1; - } - let others: Vec = counted.iter().max_by_key(|i| i.1).expect("r is not empty; qed").0.clone(); - let values = r.iter() - .filter(|v| - v.components.iter() - .map(|x| x.1) - .zip(others.iter()) - .enumerate() - .all(|(j, (v1, v2))| j == i || v1 == *v2) - ).map(|result| { - // Extract the data we are interested in analyzing - let data = match selector { - BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, - BenchmarkSelector::StorageRootTime => result.storage_root_time, - BenchmarkSelector::Reads => result.reads.into(), - BenchmarkSelector::Writes => result.writes.into(), - BenchmarkSelector::ProofSize => result.proof_size.into(), - }; - (result.components[i].1, data) - }) - .collect::>(); - (format!("{:?}", param), i, others, values) - }).collect::>(); - - let models = results.iter().map(|(_, _, _, ref values)| { - let mut slopes = vec![]; - for (i, &(x1, y1)) in values.iter().enumerate() { - for &(x2, y2) in values.iter().skip(i + 1) { - if x1 != x2 { - slopes.push((y1 as f64 - y2 as f64) / (x1 as f64 - x2 as f64)); + if r[0].components.is_empty() { + return Self::median_value(r, selector) + } + + let results = r[0] + .components + .iter() + .enumerate() + .map(|(i, &(param, _))| { + let mut counted = BTreeMap::, usize>::new(); + for result in r.iter() { + let mut p = result.components.iter().map(|x| x.1).collect::>(); + p[i] = 0; + *counted.entry(p).or_default() += 1; + } + let others: Vec = + counted.iter().max_by_key(|i| i.1).expect("r is not empty; qed").0.clone(); + let values = r + .iter() + .filter(|v| { + v.components + .iter() + .map(|x| x.1) + .zip(others.iter()) + .enumerate() + .all(|(j, (v1, v2))| j == i || v1 == *v2) + }) + .map(|result| { + // Extract the data we are interested in analyzing + let data = match selector { + BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, + BenchmarkSelector::StorageRootTime => result.storage_root_time, + BenchmarkSelector::Reads => result.reads.into(), + BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), + }; + (result.components[i].1, data) + }) + .collect::>(); + (format!("{:?}", param), i, others, values) + }) + .collect::>(); + + let models = results + .iter() + .map(|(_, _, _, ref values)| { + let mut slopes = vec![]; + for (i, &(x1, y1)) in values.iter().enumerate() { + for &(x2, y2) in values.iter().skip(i + 1) { + if x1 != x2 { + slopes.push((y1 as f64 - y2 as f64) / (x1 as f64 - x2 as f64)); + } } } - } - slopes.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); - let slope = slopes[slopes.len() / 2]; + slopes.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let slope = slopes[slopes.len() / 2]; - let mut offsets = vec![]; - for &(x, y) in values.iter() { - offsets.push(y as f64 - slope * x as f64); - } - offsets.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); - let offset = offsets[offsets.len() / 2]; + let mut offsets = vec![]; + for &(x, y) in values.iter() { + offsets.push(y as f64 - slope * x as f64); + } + offsets.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let offset = offsets[offsets.len() / 2]; - (offset, slope) - }).collect::>(); + (offset, slope) + }) + .collect::>(); - let models = models.iter() + let models = models + .iter() .zip(results.iter()) .map(|((offset, slope), (_, i, others, _))| { - let over = others.iter() + let over = others + .iter() .enumerate() .filter(|(j, _)| j != i) .map(|(j, v)| models[j].1 * *v as f64) @@ -183,18 +200,20 @@ impl Analysis { } pub fn min_squares_iqr(r: &Vec, selector: BenchmarkSelector) -> Option { - if r[0].components.is_empty() { return Self::median_value(r, selector) } + if r[0].components.is_empty() { + return Self::median_value(r, selector) + } let mut results = BTreeMap::, Vec>::new(); for result in r.iter() { let p = result.components.iter().map(|x| x.1).collect::>(); results.entry(p).or_default().push(match selector { - BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, - BenchmarkSelector::StorageRootTime => result.storage_root_time, - BenchmarkSelector::Reads => result.reads.into(), - BenchmarkSelector::Writes => result.writes.into(), - BenchmarkSelector::ProofSize => result.proof_size.into(), - }) + BenchmarkSelector::ExtrinsicTime => result.extrinsic_time, + BenchmarkSelector::StorageRootTime => result.storage_root_time, + BenchmarkSelector::Reads => result.reads.into(), + BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), + }) } for (_, rs) in results.iter_mut() { @@ -203,21 +222,19 @@ impl Analysis { *rs = rs[ql..rs.len() - ql].to_vec(); } - let mut data = vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; + let mut data = + vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; let names = r[0].components.iter().map(|x| format!("{:?}", x.0)).collect::>(); - data.extend(names.iter() - .enumerate() - .map(|(i, p)| ( + data.extend(names.iter().enumerate().map(|(i, p)| { + ( p.as_str(), - results.iter() - .flat_map(|x| Some(x.0[i] as f64) - .into_iter() - .cycle() - .take(x.1.len()) - ).collect::>() - )) - ); + results + .iter() + .flat_map(|x| Some(x.0[i] as f64).into_iter().cycle().take(x.1.len())) + .collect::>(), + ) + })); let data = RegressionDataBuilder::new().build_from(data).ok()?; @@ -227,25 +244,31 @@ impl Analysis { .fit() .ok()?; - let slopes = model.parameters.regressor_values.iter() + let slopes = model + .parameters + .regressor_values + .iter() .enumerate() .map(|(_, x)| (*x + 0.5) as u128) .collect(); - let value_dists = results.iter().map(|(p, vs)| { - // Avoid divide by zero - if vs.len() == 0 { return (p.clone(), 0, 0) } - let total = vs.iter() - .fold(0u128, |acc, v| acc + *v); - let mean = total / vs.len() as u128; - let sum_sq_diff = vs.iter() - .fold(0u128, |acc, v| { + let value_dists = results + .iter() + .map(|(p, vs)| { + // Avoid divide by zero + if vs.len() == 0 { + return (p.clone(), 0, 0) + } + let total = vs.iter().fold(0u128, |acc, v| acc + *v); + let mean = total / vs.len() as u128; + let sum_sq_diff = vs.iter().fold(0u128, |acc, v| { let d = mean.max(*v) - mean.min(*v); acc + d * d }); - let stddev = (sum_sq_diff as f64 / vs.len() as f64).sqrt() as u128; - (p.clone(), mean, stddev) - }).collect::>(); + let stddev = (sum_sq_diff as f64 / vs.len() as f64).sqrt() as u128; + (p.clone(), mean, stddev) + }) + .collect::>(); Some(Self { base: (model.parameters.intercept_value + 0.5) as u128, @@ -261,32 +284,30 @@ impl Analysis { let min_squares = Self::min_squares_iqr(r, selector); if median_slopes.is_none() || min_squares.is_none() { - return None; + return None } let median_slopes = median_slopes.unwrap(); let min_squares = min_squares.unwrap(); let base = median_slopes.base.max(min_squares.base); - let slopes = median_slopes.slopes.into_iter() + let slopes = median_slopes + .slopes + .into_iter() .zip(min_squares.slopes.into_iter()) - .map(|(a, b): (u128, u128)| { a.max(b) }) + .map(|(a, b): (u128, u128)| a.max(b)) .collect::>(); // components should always be in the same order - median_slopes.names.iter() + median_slopes + .names + .iter() .zip(min_squares.names.iter()) .for_each(|(a, b)| assert!(a == b, "benchmark results not in the same order")); let names = median_slopes.names; let value_dists = min_squares.value_dists; let model = min_squares.model; - Some(Self { - base, - slopes, - names, - value_dists, - model, - }) + Some(Self { base, slopes, names, value_dists, model }) } } @@ -295,7 +316,7 @@ fn ms(mut nanos: u128) -> String { while x > 1 { if nanos > x * 1_000 { nanos = nanos / x * x; - break; + break } x /= 10; } @@ -306,19 +327,35 @@ impl std::fmt::Display for Analysis { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if let Some(ref value_dists) = self.value_dists { writeln!(f, "\nData points distribution:")?; - writeln!(f, "{} mean µs sigma µs %", self.names.iter().map(|p| format!("{:>5}", p)).collect::>().join(" "))?; + writeln!( + f, + "{} mean µs sigma µs %", + self.names.iter().map(|p| format!("{:>5}", p)).collect::>().join(" ") + )?; for (param_values, mean, sigma) in value_dists.iter() { if *mean == 0 { - writeln!(f, "{} {:>8} {:>8} {:>3}.{}%", - param_values.iter().map(|v| format!("{:>5}", v)).collect::>().join(" "), + writeln!( + f, + "{} {:>8} {:>8} {:>3}.{}%", + param_values + .iter() + .map(|v| format!("{:>5}", v)) + .collect::>() + .join(" "), ms(*mean), ms(*sigma), "?", "?" )?; } else { - writeln!(f, "{} {:>8} {:>8} {:>3}.{}%", - param_values.iter().map(|v| format!("{:>5}", v)).collect::>().join(" "), + writeln!( + f, + "{} {:>8} {:>8} {:>3}.{}%", + param_values + .iter() + .map(|v| format!("{:>5}", v)) + .collect::>() + .join(" "), ms(*mean), ms(*sigma), (sigma * 100 / mean), @@ -350,7 +387,7 @@ impl std::fmt::Debug for Analysis { for (&m, n) in self.slopes.iter().zip(self.names.iter()) { write!(f, " + ({} * {})", m, n)?; } - write!(f,"") + write!(f, "") } } @@ -382,17 +419,66 @@ mod tests { #[test] fn analysis_median_slopes_should_work() { let data = vec![ - benchmark_result(vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0, 3, 10), - benchmark_result(vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0, 4, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0, 5, 10), - benchmark_result(vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0, 6, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0, 5, 2), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0, 5, 6), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0, 5, 14), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0, 5, 20), + benchmark_result( + vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], + 11_500_000, + 0, + 3, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], + 12_500_000, + 0, + 4, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], + 13_500_000, + 0, + 5, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], + 14_500_000, + 0, + 6, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], + 13_100_000, + 0, + 5, + 2, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], + 13_300_000, + 0, + 5, + 6, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], + 13_700_000, + 0, + 5, + 14, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], + 14_000_000, + 0, + 5, + 20, + ), ]; - let extrinsic_time = Analysis::median_slopes(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + let extrinsic_time = + Analysis::median_slopes(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); assert_eq!(extrinsic_time.base, 10_000_000); assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); @@ -408,17 +494,66 @@ mod tests { #[test] fn analysis_median_min_squares_should_work() { let data = vec![ - benchmark_result(vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0, 3, 10), - benchmark_result(vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0, 4, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0, 5, 10), - benchmark_result(vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0, 6, 10), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0, 5, 2), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0, 5, 6), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0, 5, 14), - benchmark_result(vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0, 5, 20), + benchmark_result( + vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], + 11_500_000, + 0, + 3, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], + 12_500_000, + 0, + 4, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], + 13_500_000, + 0, + 5, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], + 14_500_000, + 0, + 6, + 10, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], + 13_100_000, + 0, + 5, + 2, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], + 13_300_000, + 0, + 5, + 6, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], + 13_700_000, + 0, + 5, + 14, + ), + benchmark_result( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], + 14_000_000, + 0, + 5, + 20, + ), ]; - let extrinsic_time = Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); + let extrinsic_time = + Analysis::min_squares_iqr(&data, BenchmarkSelector::ExtrinsicTime).unwrap(); assert_eq!(extrinsic_time.base, 10_000_000); assert_eq!(extrinsic_time.slopes, vec![1_000_000, 100_000]); diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index fb4fd0801a2453aaaadd7f44b6af03e6cb7d1f1b..ebf8a209860d6d32c8c184ea99a3b855378c028d 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -19,35 +19,35 @@ #![cfg_attr(not(feature = "std"), no_std)] -mod tests; -mod utils; #[cfg(feature = "std")] mod analysis; +mod tests; +mod utils; -pub use utils::*; #[cfg(feature = "std")] -pub use analysis::{Analysis, BenchmarkSelector, RegressionModel, AnalysisChoice}; -#[doc(hidden)] -pub use sp_io::storage::root as storage_root; -#[doc(hidden)] -pub use sp_runtime::traits::Zero; +pub use analysis::{Analysis, AnalysisChoice, BenchmarkSelector, RegressionModel}; #[doc(hidden)] pub use frame_support; #[doc(hidden)] -pub use sp_std::{self, vec, prelude::Vec, boxed::Box}; +pub use log; #[doc(hidden)] pub use paste; #[doc(hidden)] -pub use sp_storage::TrackedStorageKey; +pub use sp_io::storage::root as storage_root; #[doc(hidden)] -pub use log; +pub use sp_runtime::traits::Zero; +#[doc(hidden)] +pub use sp_std::{self, boxed::Box, prelude::Vec, vec}; +#[doc(hidden)] +pub use sp_storage::TrackedStorageKey; +pub use utils::*; /// Whitelist the given account. #[macro_export] macro_rules! whitelist { ($acc:ident) => { frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::Account::::hashed_key_for(&$acc).into() + frame_system::Account::::hashed_key_for(&$acc).into(), ); }; } @@ -1081,7 +1081,6 @@ macro_rules! impl_benchmark_test { /// /// - It must be the name of a method applied to the output of the `new_test_ext` argument. /// - That method must have a signature capable of receiving a single argument of the form `impl FnOnce()`. -/// // ## Notes (not for rustdoc) // // The biggest challenge for this macro is communicating the actual test functions to be run. We @@ -1260,9 +1259,9 @@ pub fn show_benchmark_debug_info( * Verify: {:?}\n\ * Error message: {}", sp_std::str::from_utf8(instance_string) - .expect("it's all just strings ran through the wasm interface. qed"), + .expect("it's all just strings ran through the wasm interface. qed"), sp_std::str::from_utf8(benchmark) - .expect("it's all just strings ran through the wasm interface. qed"), + .expect("it's all just strings ran through the wasm interface. qed"), lowest_range_values, highest_range_values, steps, diff --git a/substrate/frame/benchmarking/src/tests.rs b/substrate/frame/benchmarking/src/tests.rs index 646609c7c1e16f59c7212db6c7dea47453c41134..7bb1f9d7d62cbf99063a1be61380887ea03d7dfe 100644 --- a/substrate/frame/benchmarking/src/tests.rs +++ b/substrate/frame/benchmarking/src/tests.rs @@ -20,9 +20,13 @@ #![cfg(test)] use super::*; -use sp_std::prelude::*; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}, BuildStorage}; use frame_support::parameter_types; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, +}; +use sp_std::prelude::*; mod pallet_test { use frame_support::pallet_prelude::Get; @@ -59,7 +63,8 @@ mod pallet_test { } pub trait Config: frame_system::Config + OtherConfig - where Self::OtherEvent: Into<::Event> + where + Self::OtherEvent: Into<::Event>, { type Event; type LowerBound: Get; @@ -107,7 +112,7 @@ impl frame_system::Config for Test { type OnSetCode = (); } -parameter_types!{ +parameter_types! { pub const LowerBound: u32 = 1; pub const UpperBound: u32 = 100; } @@ -127,16 +132,20 @@ fn new_test_ext() -> sp_io::TestExternalities { } mod benchmarks { - use sp_std::prelude::*; + use super::{ + new_test_ext, + pallet_test::{self, Value}, + Test, + }; + use crate::{account, BenchmarkParameter, BenchmarkingSetup}; + use frame_support::{assert_err, assert_ok, ensure, traits::Get, StorageValue}; use frame_system::RawOrigin; - use super::{Test, pallet_test::{self, Value}, new_test_ext}; - use frame_support::{assert_ok, assert_err, ensure, traits::Get, StorageValue}; - use crate::{BenchmarkingSetup, BenchmarkParameter, account}; + use sp_std::prelude::*; // Additional used internally by the benchmark macro. use super::pallet_test::{Call, Config, Pallet}; - crate::benchmarks!{ + crate::benchmarks! { where_clause { where ::OtherEvent: Into<::Event> + Clone, @@ -204,7 +213,8 @@ mod benchmarks { &selected, &[(BenchmarkParameter::b, 1)], true, - ).expect("failed to create closure"); + ) + .expect("failed to create closure"); new_test_ext().execute_with(|| { assert_ok!(closure()); @@ -222,7 +232,8 @@ mod benchmarks { &selected, &[(BenchmarkParameter::b, 1)], true, - ).expect("failed to create closure"); + ) + .expect("failed to create closure"); new_test_ext().execute_with(|| { assert_ok!(closure()); @@ -240,7 +251,8 @@ mod benchmarks { &selected, &[(BenchmarkParameter::x, 1)], true, - ).expect("failed to create closure"); + ) + .expect("failed to create closure"); assert_ok!(closure()); } @@ -254,7 +266,8 @@ mod benchmarks { &selected, &[(BenchmarkParameter::b, 1)], true, - ).expect("failed to create closure"); + ) + .expect("failed to create closure"); new_test_ext().execute_with(|| { assert_ok!(closure()); @@ -267,7 +280,8 @@ mod benchmarks { &selected, &[(BenchmarkParameter::x, 10000)], true, - ).expect("failed to create closure"); + ) + .expect("failed to create closure"); new_test_ext().execute_with(|| { assert_err!(closure(), "You forgot to sort!"); diff --git a/substrate/frame/benchmarking/src/utils.rs b/substrate/frame/benchmarking/src/utils.rs index c40434fb1a5840124ef59e9e55ace09bb645f100..33d479a0b54a7586a61cf5708d65e0910b0f7d34 100644 --- a/substrate/frame/benchmarking/src/utils.rs +++ b/substrate/frame/benchmarking/src/utils.rs @@ -17,18 +17,43 @@ //! Interfaces, types and utils for benchmarking a FRAME runtime. -use codec::{Encode, Decode}; -use sp_std::{vec::Vec, prelude::Box}; +use codec::{Decode, Encode}; +use frame_support::traits::StorageInfo; use sp_io::hashing::blake2_256; +use sp_std::{prelude::Box, vec::Vec}; use sp_storage::TrackedStorageKey; -use frame_support::traits::StorageInfo; /// An alphabet of possible parameters to use for benchmarking. #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] #[allow(missing_docs)] #[allow(non_camel_case_types)] pub enum BenchmarkParameter { - a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, + a, + b, + c, + d, + e, + f, + g, + h, + i, + j, + k, + l, + m, + n, + o, + p, + q, + r, + s, + t, + u, + v, + w, + x, + y, + z, } #[cfg(feature = "std")] @@ -105,7 +130,8 @@ pub trait Benchmarking { /// WARNING! This is a non-deterministic call. Do not use this within /// consensus critical logic. fn current_time() -> u128 { - std::time::SystemTime::now().duration_since(std::time::SystemTime::UNIX_EPOCH) + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) .expect("Unix time doesn't go backwards; qed") .as_nanos() } @@ -153,7 +179,7 @@ pub trait Benchmarking { // If the key does not exist, add it. None => { whitelist.push(add); - } + }, } self.set_whitelist(whitelist); } @@ -217,12 +243,16 @@ pub trait BenchmarkingSetup { fn instance( &self, components: &[(BenchmarkParameter, u32)], - verify: bool + verify: bool, ) -> Result Result<(), &'static str>>, &'static str>; } /// Grab an account, seeded by a name and index. -pub fn account(name: &'static str, index: u32, seed: u32) -> AccountId { +pub fn account( + name: &'static str, + index: u32, + seed: u32, +) -> AccountId { let entropy = (name, index, seed).using_encoded(blake2_256); AccountId::decode(&mut &entropy[..]).unwrap_or_default() } @@ -236,7 +266,7 @@ pub fn whitelisted_caller() -> AccountId { macro_rules! whitelist_account { ($acc:ident) => { frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::Account::::hashed_key_for(&$acc).into() + frame_system::Account::::hashed_key_for(&$acc).into(), ); - } + }; } diff --git a/substrate/frame/bounties/src/benchmarking.rs b/substrate/frame/bounties/src/benchmarking.rs index 23542e6c31b80b68e0b816c280404c4ff90b6965..c95c13649b6a42d3549aa935ec558f6b0131888a 100644 --- a/substrate/frame/bounties/src/benchmarking.rs +++ b/substrate/frame/bounties/src/benchmarking.rs @@ -21,10 +21,10 @@ use super::*; -use sp_runtime::traits::Bounded; -use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_support::traits::OnInitialize; +use frame_system::RawOrigin; +use sp_runtime::traits::Bounded; use crate::Module as Bounties; use pallet_treasury::Pallet as Treasury; @@ -33,7 +33,7 @@ const SEED: u32 = 0; // Create bounties that are approved for use in `on_initialize`. fn create_approved_bounties(n: u32) -> Result<(), &'static str> { - for i in 0 .. n { + for i in 0..n { let (caller, _curator, _fee, value, reason) = setup_bounty::(i, MAX_BYTES); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; let bounty_id = BountyCount::get() - 1; @@ -44,13 +44,10 @@ fn create_approved_bounties(n: u32) -> Result<(), &'static str> { } // Create the pre-requisite information needed to create a treasury `propose_bounty`. -fn setup_bounty(u: u32, d: u32) -> ( - T::AccountId, - T::AccountId, - BalanceOf, - BalanceOf, - Vec, -) { +fn setup_bounty( + u: u32, + d: u32, +) -> (T::AccountId, T::AccountId, BalanceOf, BalanceOf, Vec) { let caller = account("caller", u, SEED); let value: BalanceOf = T::BountyValueMinimum::get().saturating_mul(100u32.into()); let fee = value / 2u32.into(); @@ -62,10 +59,8 @@ fn setup_bounty(u: u32, d: u32) -> ( (caller, curator, fee, value, reason) } -fn create_bounty() -> Result<( - ::Source, - BountyIndex, -), &'static str> { +fn create_bounty( +) -> Result<(::Source, BountyIndex), &'static str> { let (caller, curator, fee, value, reason) = setup_bounty::(0, MAX_BYTES); let curator_lookup = T::Lookup::unlookup(curator.clone()); Bounties::::propose_bounty(RawOrigin::Signed(caller).into(), value, reason)?; @@ -216,8 +211,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Bounties, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/bounties/src/lib.rs b/substrate/frame/bounties/src/lib.rs index 419713ab5eff505ab02282eaed1326d369c73ec8..4700b1d34d810ea4109969df15d2195ba46bfc20 100644 --- a/substrate/frame/bounties/src/lib.rs +++ b/substrate/frame/bounties/src/lib.rs @@ -74,28 +74,28 @@ #![cfg_attr(not(feature = "std"), no_std)] -mod tests; mod benchmarking; +mod tests; pub mod weights; use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, decl_event, ensure, decl_error}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure}; use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{AllowDeath}, - ReservableCurrency}; + Currency, ExistenceRequirement::AllowDeath, Get, Imbalance, OnUnbalanced, ReservableCurrency, +}; -use sp_runtime::{Permill, RuntimeDebug, DispatchResult, traits::{ - Zero, StaticLookup, AccountIdConversion, Saturating, BadOrigin -}}; +use sp_runtime::{ + traits::{AccountIdConversion, BadOrigin, Saturating, StaticLookup, Zero}, + DispatchResult, Permill, RuntimeDebug, +}; -use frame_support::dispatch::DispatchResultWithPostInfo; -use frame_support::traits::{EnsureOrigin}; +use frame_support::{dispatch::DispatchResultWithPostInfo, traits::EnsureOrigin}; -use frame_support::weights::{Weight}; +use frame_support::weights::Weight; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_system::{self as system, ensure_signed}; pub use weights::WeightInfo; @@ -104,7 +104,6 @@ type BalanceOf = pallet_treasury::BalanceOf; type PositiveImbalanceOf = pallet_treasury::PositiveImbalanceOf; pub trait Config: frame_system::Config + pallet_treasury::Config { - /// The amount held on deposit for placing a bounty proposal. type BountyDepositBase: Get>; @@ -692,14 +691,17 @@ impl Module { description: Vec, value: BalanceOf, ) -> DispatchResult { - ensure!(description.len() <= T::MaximumReasonLength::get() as usize, Error::::ReasonTooBig); + ensure!( + description.len() <= T::MaximumReasonLength::get() as usize, + Error::::ReasonTooBig + ); ensure!(value >= T::BountyValueMinimum::get(), Error::::InvalidValue); let index = Self::bounty_count(); // reserve deposit for new bounty - let bond = T::BountyDepositBase::get() - + T::DataDepositPerByte::get() * (description.len() as u32).into(); + let bond = T::BountyDepositBase::get() + + T::DataDepositPerByte::get() * (description.len() as u32).into(); T::Currency::reserve(&proposer, bond) .map_err(|_| Error::::InsufficientProposersBalance)?; @@ -721,7 +723,6 @@ impl Module { Ok(()) } - } impl pallet_treasury::SpendFunds for Module { @@ -729,7 +730,7 @@ impl pallet_treasury::SpendFunds for Module { budget_remaining: &mut BalanceOf, imbalance: &mut PositiveImbalanceOf, total_weight: &mut Weight, - missed_any: &mut bool + missed_any: &mut bool, ) { let bounties_len = BountyApprovals::mutate(|v| { let bounties_approval_len = v.len() as u32; @@ -747,7 +748,10 @@ impl pallet_treasury::SpendFunds for Module { debug_assert!(err_amount.is_zero()); // fund the bounty account - imbalance.subsume(T::Currency::deposit_creating(&Self::bounty_account_id(index), bounty.value)); + imbalance.subsume(T::Currency::deposit_creating( + &Self::bounty_account_id(index), + bounty.value, + )); Self::deposit_event(RawEvent::BountyBecameActive(index)); false diff --git a/substrate/frame/bounties/src/tests.rs b/substrate/frame/bounties/src/tests.rs index 2e96d8271e1308170c30fa2f483d3ab0d196533c..5ce1373ed90660da8484bf05d82c7fe6453888e9 100644 --- a/substrate/frame/bounties/src/tests.rs +++ b/substrate/frame/bounties/src/tests.rs @@ -19,20 +19,20 @@ #![cfg(test)] -use crate as pallet_bounties; use super::*; +use crate as pallet_bounties; use std::cell::RefCell; use frame_support::{ - assert_noop, assert_ok, parameter_types, weights::Weight, traits::OnInitialize, - PalletId, pallet_prelude::GenesisBuild, + assert_noop, assert_ok, pallet_prelude::GenesisBuild, parameter_types, traits::OnInitialize, + weights::Weight, PalletId, }; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -121,7 +121,7 @@ impl pallet_treasury::Config for Test { type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; - type BurnDestination = (); // Just gets burned. + type BurnDestination = (); // Just gets burned. type WeightInfo = (); type SpendFunds = Bounties; type MaxApprovals = MaxApprovals; @@ -146,23 +146,25 @@ impl Config for Test { type WeightInfo = (); } -type TreasuryError = pallet_treasury::Error::; +type TreasuryError = pallet_treasury::Error; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); t.into() } fn last_event() -> RawEvent { - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::Bounties(inner) = e { Some(inner) } else { None } - }) + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::Bounties(inner) = e { Some(inner) } else { None }) .last() .unwrap() } @@ -267,8 +269,10 @@ fn reject_already_rejected_spend_proposal_fails() { #[test] fn reject_non_existent_spend_proposal_fails() { new_test_ext().execute_with(|| { - assert_noop!(Treasury::reject_proposal(Origin::root(), 0), - pallet_treasury::Error::::InvalidIndex); + assert_noop!( + Treasury::reject_proposal(Origin::root(), 0), + pallet_treasury::Error::::InvalidIndex + ); }); } @@ -353,9 +357,9 @@ fn treasury_account_doesnt_get_deleted() { #[test] fn inexistent_account_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(0, 100), (1, 99), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(0, 100), (1, 99), (2, 1)] } + .assimilate_storage(&mut t) + .unwrap(); // Treasury genesis config is not build thus treasury account does not exist let mut t: sp_io::TestExternalities = t.into(); @@ -398,14 +402,17 @@ fn propose_bounty_works() { assert_eq!(Balances::reserved_balance(0), deposit); assert_eq!(Balances::free_balance(0), 100 - deposit); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 10, - bond: deposit, - status: BountyStatus::Proposed, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 10, + bond: deposit, + status: BountyStatus::Proposed, + } + ); assert_eq!(Bounties::bounty_descriptions(0).unwrap(), b"1234567890".to_vec()); @@ -476,14 +483,17 @@ fn approve_bounty_works() { let deposit: u64 = 80 + 5; - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - value: 50, - curator_deposit: 0, - bond: deposit, - status: BountyStatus::Approved, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + value: 50, + curator_deposit: 0, + bond: deposit, + status: BountyStatus::Approved, + } + ); assert_eq!(Bounties::bounty_approvals(), vec![0]); assert_noop!(Bounties::close_bounty(Origin::root(), 0), Error::::UnexpectedStatus); @@ -498,14 +508,17 @@ fn approve_bounty_works() { assert_eq!(Balances::reserved_balance(0), 0); assert_eq!(Balances::free_balance(0), 100); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: deposit, - status: BountyStatus::Funded, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: deposit, + status: BountyStatus::Funded, + } + ); assert_eq!(Treasury::pot(), 100 - 50 - 25); // burn 25 assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 50); @@ -518,7 +531,10 @@ fn assign_curator_works() { System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_noop!(Bounties::propose_curator(Origin::root(), 0, 4, 4), Error::::InvalidIndex); + assert_noop!( + Bounties::propose_curator(Origin::root(), 0, 4, 4), + Error::::InvalidIndex + ); assert_ok!(Bounties::propose_bounty(Origin::signed(0), 50, b"12345".to_vec())); @@ -527,39 +543,46 @@ fn assign_curator_works() { System::set_block_number(2); >::on_initialize(2); - assert_noop!(Bounties::propose_curator(Origin::root(), 0, 4, 50), Error::::InvalidFee); + assert_noop!( + Bounties::propose_curator(Origin::root(), 0, 4, 50), + Error::::InvalidFee + ); assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::CuratorProposed { - curator: 4, - }, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::CuratorProposed { curator: 4 }, + } + ); assert_noop!(Bounties::accept_curator(Origin::signed(1), 0), Error::::RequireCurator); - assert_noop!(Bounties::accept_curator(Origin::signed(4), 0), pallet_balances::Error::::InsufficientBalance); + assert_noop!( + Bounties::accept_curator(Origin::signed(4), 0), + pallet_balances::Error::::InsufficientBalance + ); Balances::make_free_balance_be(&4, 10); assert_ok!(Bounties::accept_curator(Origin::signed(4), 0)); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::Active { - curator: 4, - update_due: 22, - }, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 22 }, + } + ); assert_eq!(Balances::free_balance(&4), 8); assert_eq!(Balances::reserved_balance(&4), 2); @@ -584,14 +607,17 @@ fn unassign_curator_works() { assert_ok!(Bounties::unassign_curator(Origin::signed(4), 0)); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); assert_ok!(Bounties::propose_curator(Origin::root(), 0, 4, 4)); @@ -601,21 +627,23 @@ fn unassign_curator_works() { assert_ok!(Bounties::unassign_curator(Origin::root(), 0)); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); assert_eq!(Balances::free_balance(&4), 8); assert_eq!(Balances::reserved_balance(&4), 0); // slashed 2 }); } - #[test] fn award_and_claim_bounty_works() { new_test_ext().execute_with(|| { @@ -634,22 +662,24 @@ fn award_and_claim_bounty_works() { assert_eq!(Balances::free_balance(4), 8); // inital 10 - 2 deposit - assert_noop!(Bounties::award_bounty(Origin::signed(1), 0, 3), Error::::RequireCurator); + assert_noop!( + Bounties::award_bounty(Origin::signed(1), 0, 3), + Error::::RequireCurator + ); assert_ok!(Bounties::award_bounty(Origin::signed(4), 0, 3)); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 4, - curator_deposit: 2, - value: 50, - bond: 85, - status: BountyStatus::PendingPayout { - curator: 4, - beneficiary: 3, - unlock_at: 5 - }, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 4, + curator_deposit: 2, + value: 50, + bond: 85, + status: BountyStatus::PendingPayout { curator: 4, beneficiary: 3, unlock_at: 5 }, + } + ); assert_noop!(Bounties::claim_bounty(Origin::signed(1), 0), Error::::Premature); @@ -713,7 +743,6 @@ fn claim_handles_high_fee() { #[test] fn cancel_and_refund() { new_test_ext().execute_with(|| { - System::set_block_number(1); Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -727,14 +756,17 @@ fn cancel_and_refund() { assert_ok!(Balances::transfer(Origin::signed(0), Bounties::bounty_account_id(0), 10)); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 0, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 0, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); assert_eq!(Balances::free_balance(Bounties::bounty_account_id(0)), 60); @@ -743,9 +775,7 @@ fn cancel_and_refund() { assert_ok!(Bounties::close_bounty(Origin::root(), 0)); assert_eq!(Treasury::pot(), 85); // - 25 + 10 - }); - } #[test] @@ -816,18 +846,20 @@ fn expire_and_unassign() { assert_ok!(Bounties::unassign_curator(Origin::signed(0), 0)); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 0, - value: 50, - bond: 85, - status: BountyStatus::Funded, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 10, + curator_deposit: 0, + value: 50, + bond: 85, + status: BountyStatus::Funded, + } + ); assert_eq!(Balances::free_balance(1), 93); assert_eq!(Balances::reserved_balance(1), 0); // slashed - }); } @@ -841,7 +873,10 @@ fn extend_expiry() { assert_ok!(Bounties::approve_bounty(Origin::root(), 0)); - assert_noop!(Bounties::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), Error::::UnexpectedStatus); + assert_noop!( + Bounties::extend_bounty_expiry(Origin::signed(1), 0, Vec::new()), + Error::::UnexpectedStatus + ); System::set_block_number(2); >::on_initialize(2); @@ -855,28 +890,37 @@ fn extend_expiry() { System::set_block_number(10); >::on_initialize(10); - assert_noop!(Bounties::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), Error::::RequireCurator); + assert_noop!( + Bounties::extend_bounty_expiry(Origin::signed(0), 0, Vec::new()), + Error::::RequireCurator + ); assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, + } + ); assert_ok!(Bounties::extend_bounty_expiry(Origin::signed(4), 0, Vec::new())); - assert_eq!(Bounties::bounties(0).unwrap(), Bounty { - proposer: 0, - fee: 10, - curator_deposit: 5, - value: 50, - bond: 85, - status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same - }); + assert_eq!( + Bounties::bounties(0).unwrap(), + Bounty { + proposer: 0, + fee: 10, + curator_deposit: 5, + value: 50, + bond: 85, + status: BountyStatus::Active { curator: 4, update_due: 30 }, // still the same + } + ); System::set_block_number(25); >::on_initialize(25); @@ -893,10 +937,12 @@ fn extend_expiry() { fn genesis_funding_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let initial_funding = 100; - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized with 100. balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); let mut t: sp_io::TestExternalities = t.into(); diff --git a/substrate/frame/bounties/src/weights.rs b/substrate/frame/bounties/src/weights.rs index 9b50d438923c2fb21a6d53f139f1ded0e68554a0..2f982490bd44a7705bda9e0df6f1adb7291f1db0 100644 --- a/substrate/frame/bounties/src/weights.rs +++ b/substrate/frame/bounties/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/collective/src/benchmarking.rs b/substrate/frame/collective/src/benchmarking.rs index 7faaa31dc801228fa3b573c4cefdeecc791a48d2..2862c830959c9df2ba3b992cf7747de4eba1e6f7 100644 --- a/substrate/frame/collective/src/benchmarking.rs +++ b/substrate/frame/collective/src/benchmarking.rs @@ -19,19 +19,15 @@ use super::*; -use frame_system::RawOrigin as SystemOrigin; use frame_benchmarking::{ - benchmarks_instance, - account, - whitelisted_caller, - impl_benchmark_test_suite, + account, benchmarks_instance, impl_benchmark_test_suite, whitelisted_caller, }; +use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::Bounded; use sp_std::mem::size_of; -use frame_system::Call as SystemCall; -use frame_system::Pallet as System; use crate::Module as Collective; +use frame_system::{Call as SystemCall, Pallet as System}; const SEED: u32 = 0; @@ -639,8 +635,4 @@ benchmarks_instance! { } } -impl_benchmark_test_suite!( - Collective, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/collective/src/lib.rs b/substrate/frame/collective/src/lib.rs index a6e44b96feaa48d20ddb3b21e79dad8a46d46ece..0747e4e9ade0f497c696fba6ea924a65c901d119 100644 --- a/substrate/frame/collective/src/lib.rs +++ b/substrate/frame/collective/src/lib.rs @@ -42,22 +42,24 @@ #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit = "128"] -use sp_std::{prelude::*, result}; use sp_core::u32_trait::Value as U32; use sp_io::storage; -use sp_runtime::{RuntimeDebug, traits::Hash}; +use sp_runtime::{traits::Hash, RuntimeDebug}; +use sp_std::{prelude::*, result}; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, ensure, BoundedVec, codec::{Decode, Encode}, + decl_error, decl_event, decl_module, decl_storage, dispatch::{ DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, Parameter, PostDispatchInfo, }, - traits::{ChangeMembers, EnsureOrigin, Get, InitializeMembers, GetBacking, Backing}, - weights::{DispatchClass, GetDispatchInfo, Weight, Pays}, + ensure, + traits::{Backing, ChangeMembers, EnsureOrigin, Get, GetBacking, InitializeMembers}, + weights::{DispatchClass, GetDispatchInfo, Pays, Weight}, + BoundedVec, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; @@ -120,13 +122,13 @@ impl DefaultVote for MoreThanMajorityThenPrimeDefaultVote { } } -pub trait Config: frame_system::Config { +pub trait Config: frame_system::Config { /// The outer origin type. type Origin: From>; /// The outer call dispatch type. type Proposal: Parameter - + Dispatchable>::Origin, PostInfo=PostDispatchInfo> + + Dispatchable>::Origin, PostInfo = PostDispatchInfo> + From> + GetDispatchInfo; @@ -174,7 +176,7 @@ impl GetBacking for RawOrigin { } /// Origin for the collective module. -pub type Origin = RawOrigin<::AccountId, I>; +pub type Origin = RawOrigin<::AccountId, I>; #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] /// Info for keeping track of a motion being voted on. @@ -287,7 +289,6 @@ fn get_result_weight(result: DispatchResultWithPostInfo) -> Option { } } - // Note that councillor operations are assigned to the operational class. decl_module! { pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { @@ -734,12 +735,12 @@ impl, I: Instance> Module { fn validate_and_get_proposal( hash: &T::Hash, length_bound: u32, - weight_bound: Weight + weight_bound: Weight, ) -> Result<(>::Proposal, usize), DispatchError> { let key = ProposalOf::::hashed_key_for(hash); // read the length of the proposal storage entry directly - let proposal_len = storage::read(&key, &mut [0; 0], 0) - .ok_or(Error::::ProposalMissing)?; + let proposal_len = + storage::read(&key, &mut [0; 0], 0).ok_or(Error::::ProposalMissing)?; ensure!(proposal_len <= length_bound, Error::::WrongProposalLength); let proposal = ProposalOf::::get(hash).ok_or(Error::::ProposalMissing)?; let proposal_weight = proposal.get_dispatch_info().weight; @@ -772,9 +773,10 @@ impl, I: Instance> Module { let dispatch_weight = proposal.get_dispatch_info().weight; let origin = RawOrigin::Members(voting.threshold, seats).into(); let result = proposal.dispatch(origin); - Self::deposit_event( - RawEvent::Executed(proposal_hash, result.map(|_| ()).map_err(|e| e.error)) - ); + Self::deposit_event(RawEvent::Executed( + proposal_hash, + result.map(|_| ()).map_err(|e| e.error), + )); // default to the dispatch info weight for safety let proposal_weight = get_result_weight(result).unwrap_or(dispatch_weight); // P1 @@ -836,17 +838,21 @@ impl, I: Instance> ChangeMembers for Module { let mut outgoing = outgoing.to_vec(); outgoing.sort(); for h in Self::proposals().into_iter() { - >::mutate(h, |v| + >::mutate(h, |v| { if let Some(mut votes) = v.take() { - votes.ayes = votes.ayes.into_iter() + votes.ayes = votes + .ayes + .into_iter() .filter(|i| outgoing.binary_search(i).is_err()) .collect(); - votes.nays = votes.nays.into_iter() + votes.nays = votes + .nays + .into_iter() .filter(|i| outgoing.binary_search(i).is_err()) .collect(); *v = Some(votes); } - ); + }); } Members::::put(new); Prime::::kill(); @@ -872,10 +878,12 @@ impl, I: Instance> InitializeMembers for Module /// Ensure that the origin `o` represents at least `n` members. Returns `Ok` or an `Err` /// otherwise. -pub fn ensure_members(o: OuterOrigin, n: MemberCount) - -> result::Result +pub fn ensure_members( + o: OuterOrigin, + n: MemberCount, +) -> result::Result where - OuterOrigin: Into, OuterOrigin>> + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::Members(x, _)) if x >= n => Ok(n), @@ -883,12 +891,15 @@ where } } -pub struct EnsureMember(sp_std::marker::PhantomData<(AccountId, I)>); +pub struct EnsureMember( + sp_std::marker::PhantomData<(AccountId, I)>, +); impl< - O: Into, O>> + From>, - AccountId: Default, - I, -> EnsureOrigin for EnsureMember { + O: Into, O>> + From>, + AccountId: Default, + I, + > EnsureOrigin for EnsureMember +{ type Success = AccountId; fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -903,13 +914,16 @@ impl< } } -pub struct EnsureMembers(sp_std::marker::PhantomData<(N, AccountId, I)>); +pub struct EnsureMembers( + sp_std::marker::PhantomData<(N, AccountId, I)>, +); impl< - O: Into, O>> + From>, - N: U32, - AccountId, - I, -> EnsureOrigin for EnsureMembers { + O: Into, O>> + From>, + N: U32, + AccountId, + I, + > EnsureOrigin for EnsureMembers +{ type Success = (MemberCount, MemberCount); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -924,16 +938,17 @@ impl< } } -pub struct EnsureProportionMoreThan( - sp_std::marker::PhantomData<(N, D, AccountId, I)> +pub struct EnsureProportionMoreThan( + sp_std::marker::PhantomData<(N, D, AccountId, I)>, ); impl< - O: Into, O>> + From>, - N: U32, - D: U32, - AccountId, - I, -> EnsureOrigin for EnsureProportionMoreThan { + O: Into, O>> + From>, + N: U32, + D: U32, + AccountId, + I, + > EnsureOrigin for EnsureProportionMoreThan +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -948,16 +963,17 @@ impl< } } -pub struct EnsureProportionAtLeast( - sp_std::marker::PhantomData<(N, D, AccountId, I)> +pub struct EnsureProportionAtLeast( + sp_std::marker::PhantomData<(N, D, AccountId, I)>, ); impl< - O: Into, O>> + From>, - N: U32, - D: U32, - AccountId, - I, -> EnsureOrigin for EnsureProportionAtLeast { + O: Into, O>> + From>, + N: U32, + D: U32, + AccountId, + I, + > EnsureOrigin for EnsureProportionAtLeast +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -975,15 +991,16 @@ impl< #[cfg(test)] mod tests { use super::*; - use frame_support::{Hashable, assert_ok, assert_noop, parameter_types}; + use crate as collective; + use frame_support::{assert_noop, assert_ok, parameter_types, Hashable}; use frame_system::{self as system, EventRecord, Phase}; use hex_literal::hex; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, BuildStorage, }; - use crate as collective; parameter_types! { pub const BlockHashCount: u64 = 250; @@ -1076,7 +1093,10 @@ mod tests { phantom: Default::default(), }, default_collective: Default::default(), - }.build_storage().unwrap().into(); + } + .build_storage() + .unwrap() + .into(); ext.execute_with(|| System::set_block_number(1)); ext } @@ -1101,65 +1121,114 @@ mod tests { let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); System::set_block_number(3); assert_noop!( - Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len), + Collective::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight, + proposal_len + ), Error::::TooEarly ); System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), - record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::Collective(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::Collective(RawEvent::Disapproved(hash.clone()))) - ]); + assert_ok!(Collective::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); + + let record = + |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; + assert_eq!( + System::events(), + vec![ + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Closed(hash.clone(), 2, 1))), + record(Event::Collective(RawEvent::Disapproved(hash.clone()))) + ] + ); }); } #[test] fn proposal_weight_limit_works_on_approve() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); + let proposal = + Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); // Set 1 as prime voter Prime::::set(Some(1)); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); // With 1's prime vote, this should pass System::set_block_number(4); assert_noop!( - Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight - 100, proposal_len), + Collective::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight - 100, + proposal_len + ), Error::::WrongProposalWeight ); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); }) } #[test] fn proposal_weight_limit_ignored_on_disapprove() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); + let proposal = + Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); // No votes, this proposal wont pass System::set_block_number(4); - assert_ok!( - Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight - 100, proposal_len) - ); + assert_ok!(Collective::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight - 100, + proposal_len + )); }) } @@ -1170,23 +1239,43 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(3), MaxMembers::get())); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::set_members( + Origin::root(), + vec![1, 2, 3], + Some(3), + MaxMembers::get() + )); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), - record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::Collective(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::Collective(RawEvent::Disapproved(hash.clone()))) - ]); + assert_ok!(Collective::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); + + let record = + |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; + assert_eq!( + System::events(), + vec![ + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Closed(hash.clone(), 2, 1))), + record(Event::Collective(RawEvent::Disapproved(hash.clone()))) + ] + ); }); } @@ -1197,24 +1286,47 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::root(), vec![1, 2, 3], Some(1), MaxMembers::get())); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::set_members( + Origin::root(), + vec![1, 2, 3], + Some(1), + MaxMembers::get() + )); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), - record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::Collective(RawEvent::Closed(hash.clone(), 3, 0))), - record(Event::Collective(RawEvent::Approved(hash.clone()))), - record(Event::Collective(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) - ]); + assert_ok!(Collective::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); + + let record = + |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; + assert_eq!( + System::events(), + vec![ + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Closed(hash.clone(), 3, 0))), + record(Event::Collective(RawEvent::Approved(hash.clone()))), + record(Event::Collective(RawEvent::Executed( + hash.clone(), + Err(DispatchError::BadOrigin) + ))) + ] + ); }); } @@ -1225,26 +1337,49 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(CollectiveMajority::set_members(Origin::root(), vec![1, 2, 3, 4, 5], Some(5), MaxMembers::get())); - - assert_ok!(CollectiveMajority::propose(Origin::signed(1), 5, Box::new(proposal.clone()), proposal_len)); + assert_ok!(CollectiveMajority::set_members( + Origin::root(), + vec![1, 2, 3, 4, 5], + Some(5), + MaxMembers::get() + )); + + assert_ok!(CollectiveMajority::propose( + Origin::signed(1), + 5, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(CollectiveMajority::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(CollectiveMajority::vote(Origin::signed(2), hash.clone(), 0, true)); assert_ok!(CollectiveMajority::vote(Origin::signed(3), hash.clone(), 0, true)); System::set_block_number(4); - assert_ok!(CollectiveMajority::close(Origin::signed(4), hash.clone(), 0, proposal_weight, proposal_len)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::CollectiveMajority(RawEvent::Proposed(1, 0, hash.clone(), 5))), - record(Event::CollectiveMajority(RawEvent::Voted(1, hash.clone(), true, 1, 0))), - record(Event::CollectiveMajority(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::CollectiveMajority(RawEvent::Voted(3, hash.clone(), true, 3, 0))), - record(Event::CollectiveMajority(RawEvent::Closed(hash.clone(), 5, 0))), - record(Event::CollectiveMajority(RawEvent::Approved(hash.clone()))), - record(Event::CollectiveMajority(RawEvent::Executed(hash.clone(), Err(DispatchError::BadOrigin)))) - ]); + assert_ok!(CollectiveMajority::close( + Origin::signed(4), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); + + let record = + |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; + assert_eq!( + System::events(), + vec![ + record(Event::CollectiveMajority(RawEvent::Proposed(1, 0, hash.clone(), 5))), + record(Event::CollectiveMajority(RawEvent::Voted(1, hash.clone(), true, 1, 0))), + record(Event::CollectiveMajority(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::CollectiveMajority(RawEvent::Voted(3, hash.clone(), true, 3, 0))), + record(Event::CollectiveMajority(RawEvent::Closed(hash.clone(), 5, 0))), + record(Event::CollectiveMajority(RawEvent::Approved(hash.clone()))), + record(Event::CollectiveMajority(RawEvent::Executed( + hash.clone(), + Err(DispatchError::BadOrigin) + ))) + ] + ); }); } @@ -1255,7 +1390,12 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); assert_eq!( @@ -1271,7 +1411,12 @@ mod tests { let proposal = make_proposal(69); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(2), 2, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(2), + 2, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 1, true)); assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); assert_eq!( @@ -1293,14 +1438,24 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 3, 4], None, MaxMembers::get())); + assert_ok!(Collective::set_members( + Origin::root(), + vec![2, 3, 4], + None, + MaxMembers::get() + )); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) @@ -1309,14 +1464,24 @@ mod tests { let proposal = make_proposal(69); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(2), 2, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(2), + 2, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 1, true)); assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); - assert_ok!(Collective::set_members(Origin::root(), vec![2, 4], None, MaxMembers::get())); + assert_ok!(Collective::set_members( + Origin::root(), + vec![2, 4], + None, + MaxMembers::get() + )); assert_eq!( Collective::voting(&hash), Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) @@ -1331,7 +1496,12 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash = proposal.blake2_256().into(); let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_eq!(*Collective::proposals(), vec![hash]); assert_eq!(Collective::proposal_of(&hash), Some(proposal)); assert_eq!( @@ -1339,25 +1509,27 @@ mod tests { Some(Votes { index: 0, threshold: 3, ayes: vec![], nays: vec![], end }) ); - assert_eq!(System::events(), vec![ - EventRecord { + assert_eq!( + System::events(), + vec![EventRecord { phase: Phase::Initialization, event: Event::Collective(RawEvent::Proposed( 1, 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), + hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"] + .into(), 3, )), topics: vec![], - } - ]); + }] + ); }); } #[test] fn limit_active_proposals() { new_test_ext().execute_with(|| { - for i in 0 .. MaxProposals::get() { + for i in 0..MaxProposals::get() { let proposal = make_proposal(i as u64); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_ok!(Collective::propose( @@ -1379,14 +1551,24 @@ mod tests { #[test] fn correct_validate_and_get_proposal() { new_test_ext().execute_with(|| { - let proposal = Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); + let proposal = + Call::Collective(crate::Call::set_members(vec![1, 2, 3], None, MaxMembers::get())); let length = proposal.encode().len() as u32; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), length)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + length + )); let hash = BlakeTwo256::hash_of(&proposal); let weight = proposal.get_dispatch_info().weight; assert_noop!( - Collective::validate_and_get_proposal(&BlakeTwo256::hash_of(&vec![3; 4]), length, weight), + Collective::validate_and_get_proposal( + &BlakeTwo256::hash_of(&vec![3; 4]), + length, + weight + ), Error::::ProposalMissing ); assert_noop!( @@ -1411,7 +1593,12 @@ mod tests { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); assert_noop!( - Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone()), proposal_len), + Collective::propose( + Origin::signed(42), + 3, + Box::new(proposal.clone()), + proposal_len + ), Error::::NotMember ); }); @@ -1423,7 +1610,12 @@ mod tests { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_noop!( Collective::vote(Origin::signed(42), hash.clone(), 0, true), Error::::NotMember, @@ -1438,7 +1630,12 @@ mod tests { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_noop!( Collective::vote(Origin::signed(2), hash.clone(), 1, true), Error::::WrongIndex, @@ -1453,7 +1650,12 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); // Initially there a no votes when the motion is proposed. assert_eq!( Collective::voting(&hash), @@ -1482,41 +1684,52 @@ mod tests { Error::::DuplicateVote, ); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 2, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Voted( - 1, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"] + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Proposed( + 1, + 0, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] .into(), - true, - 1, - 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Voted( - 1, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - false, - 0, - 1, - )), - topics: vec![], - } - ]); + 2, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Voted( + 1, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + true, + 1, + 0, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Voted( + 1, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + false, + 0, + 1, + )), + topics: vec![], + } + ] + ); }); } @@ -1527,62 +1740,40 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); let end = 4; - assert_ok!( - Collective::propose( - Origin::signed(1), - 2, - Box::new(proposal.clone()), - proposal_len, - ) - ); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len, + )); assert_eq!( Collective::voting(&hash), Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![], end }) ); // For the motion, acc 2's first vote, expecting Ok with Pays::No. - let vote_rval: DispatchResultWithPostInfo = Collective::vote( - Origin::signed(2), - hash.clone(), - 0, - true, - ); + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(2), hash.clone(), 0, true); assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); // Duplicate vote, expecting error with Pays::Yes. - let vote_rval: DispatchResultWithPostInfo = Collective::vote( - Origin::signed(2), - hash.clone(), - 0, - true, - ); + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(2), hash.clone(), 0, true); assert_eq!(vote_rval.unwrap_err().post_info.pays_fee, Pays::Yes); // Modifying vote, expecting ok with Pays::Yes. - let vote_rval: DispatchResultWithPostInfo = Collective::vote( - Origin::signed(2), - hash.clone(), - 0, - false, - ); + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(2), hash.clone(), 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); // For the motion, acc 3's first vote, expecting Ok with Pays::No. - let vote_rval: DispatchResultWithPostInfo = Collective::vote( - Origin::signed(3), - hash.clone(), - 0, - true, - ); + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(3), hash.clone(), 0, true); assert_eq!(vote_rval.unwrap().pays_fee, Pays::No); // acc 3 modify the vote, expecting Ok with Pays::Yes. - let vote_rval: DispatchResultWithPostInfo = Collective::vote( - Origin::signed(3), - hash.clone(), - 0, - false, - ); + let vote_rval: DispatchResultWithPostInfo = + Collective::vote(Origin::signed(3), hash.clone(), 0, false); assert_eq!(vote_rval.unwrap().pays_fee, Pays::Yes); // Test close() Extrincis | Check DispatchResultWithPostInfo with Pay Info @@ -1617,11 +1808,27 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); assert_eq!(*Collective::proposals(), vec![]); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); assert_eq!(*Collective::proposals(), vec![hash]); }); } @@ -1633,60 +1840,90 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::Collective( - RawEvent::Proposed( + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Proposed( 1, 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), 3, )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Voted( - 1, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - true, - 1, - 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Voted( - 2, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - false, - 1, - 1, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Closed( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), 1, 1, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Disapproved( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - )), - topics: vec![], - } - ]); + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Voted( + 1, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + true, + 1, + 0, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Voted( + 2, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + false, + 1, + 1, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Closed( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + 1, + 1, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Disapproved( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + )), + topics: vec![], + } + ] + ); }); } @@ -1697,85 +1934,134 @@ mod tests { let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 2, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Voted( - 1, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - true, - 1, - 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Voted( - 2, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - true, - 2, - 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Closed( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), 2, 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Approved( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Collective(RawEvent::Executed( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - Err(DispatchError::BadOrigin), - )), - topics: vec![], - } - ]); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Proposed( + 1, + 0, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + 2, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Voted( + 1, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + true, + 1, + 0, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Voted( + 2, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + true, + 2, + 0, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Closed( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + 2, + 0, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Approved( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Collective(RawEvent::Executed( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + Err(DispatchError::BadOrigin), + )), + topics: vec![], + } + ] + ); }); } #[test] fn motion_with_no_votes_closes_with_disapproval() { new_test_ext().execute_with(|| { - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; + let record = + |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let proposal_weight = proposal.get_dispatch_info().weight; let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_eq!(System::events()[0], record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3)))); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()), + proposal_len + )); + assert_eq!( + System::events()[0], + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 3))) + ); // Closing the motion too early is not possible because it has neither // an approving or disapproving simple majority due to the lack of votes. assert_noop!( - Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len), + Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len + ), Error::::TooEarly ); @@ -1783,13 +2069,24 @@ mod tests { let closing_block = System::block_number() + MotionDuration::get(); System::set_block_number(closing_block); // we can successfully close the motion. - assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); + assert_ok!(Collective::close( + Origin::signed(2), + hash.clone(), + 0, + proposal_weight, + proposal_len + )); // Events show that the close ended in a disapproval. - assert_eq!(System::events()[1], record(Event::Collective(RawEvent::Closed(hash.clone(), 0, 3)))); - assert_eq!(System::events()[2], record(Event::Collective(RawEvent::Disapproved(hash.clone())))); + assert_eq!( + System::events()[1], + record(Event::Collective(RawEvent::Closed(hash.clone(), 0, 3))) + ); + assert_eq!( + System::events()[2], + record(Event::Collective(RawEvent::Disapproved(hash.clone()))) + ); }) - } #[test] @@ -1801,7 +2098,12 @@ mod tests { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); // First we make the proposal succeed assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); @@ -1828,19 +2130,28 @@ mod tests { let proposal = make_proposal(42); let proposal_len: u32 = proposal.using_encoded(|p| p.len() as u32); let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()), + proposal_len + )); // Proposal would normally succeed assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, true)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); // But Root can disapprove and remove it anyway assert_ok!(Collective::disapprove_proposal(Origin::root(), hash.clone())); - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 2))), - record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), - record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::Collective(RawEvent::Disapproved(hash.clone()))), - ]); + let record = + |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; + assert_eq!( + System::events(), + vec![ + record(Event::Collective(RawEvent::Proposed(1, 0, hash.clone(), 2))), + record(Event::Collective(RawEvent::Voted(1, hash.clone(), true, 1, 0))), + record(Event::Collective(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::Collective(RawEvent::Disapproved(hash.clone()))), + ] + ); }) } @@ -1850,6 +2161,8 @@ mod tests { collective::GenesisConfig:: { members: vec![1, 2, 3, 1], phantom: Default::default(), - }.build_storage().unwrap(); + } + .build_storage() + .unwrap(); } } diff --git a/substrate/frame/collective/src/weights.rs b/substrate/frame/collective/src/weights.rs index 2bbec4d7cc3d8fef10366c98bdbb88b4ff46068c..aab389a45e5b9fca7dafef1c732ee364f550ae1c 100644 --- a/substrate/frame/collective/src/weights.rs +++ b/substrate/frame/collective/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/contracts/common/src/lib.rs b/substrate/frame/contracts/common/src/lib.rs index 098ffd64b8e8ec61bc58b91e803b36300c1c49df..9260b3e05cf3480ec809fc3b942be95861ad40e7 100644 --- a/substrate/frame/contracts/common/src/lib.rs +++ b/substrate/frame/contracts/common/src/lib.rs @@ -26,7 +26,7 @@ use sp_runtime::{DispatchError, RuntimeDebug}; use sp_std::prelude::*; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// Result type of a `bare_call` or `bare_instantiate` call. /// @@ -163,7 +163,7 @@ pub enum Code { #[cfg(feature = "std")] mod as_string { use super::*; - use serde::{Serializer, Deserializer, ser::Error}; + use serde::{ser::Error, Deserializer, Serializer}; pub fn serialize(bytes: &Vec, serializer: S) -> Result { std::str::from_utf8(bytes) diff --git a/substrate/frame/contracts/proc-macro/src/lib.rs b/substrate/frame/contracts/proc-macro/src/lib.rs index 3b8b1ea5e6636654c828ae4e51eea8e5e7579788..302a0d01a93d99fb5753e93fa57b653a180a52ac 100644 --- a/substrate/frame/contracts/proc-macro/src/lib.rs +++ b/substrate/frame/contracts/proc-macro/src/lib.rs @@ -21,11 +21,10 @@ extern crate alloc; +use alloc::string::ToString; use proc_macro2::TokenStream; use quote::{quote, quote_spanned}; -use syn::spanned::Spanned; -use syn::{parse_macro_input, Data, DataStruct, DeriveInput, Fields, Ident}; -use alloc::string::ToString; +use syn::{parse_macro_input, spanned::Spanned, Data, DataStruct, DeriveInput, Fields, Ident}; /// This derives `Debug` for a struct where each field must be of some numeric type. /// It interprets each field as its represents some weight and formats it as times so that @@ -44,7 +43,7 @@ pub fn derive_schedule_debug(input: proc_macro::TokenStream) -> proc_macro::Toke fn derive_debug( input: proc_macro::TokenStream, - fmt: impl Fn(&Ident) -> TokenStream + fmt: impl Fn(&Ident) -> TokenStream, ) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); let name = &input.ident; @@ -55,7 +54,8 @@ fn derive_debug( return quote_spanned! { name.span() => compile_error!("WeightDebug is only supported for structs."); - }.into(); + } + .into() }; #[cfg(feature = "full")] @@ -87,24 +87,22 @@ fn derive_debug( fn iterate_fields(data: &DataStruct, fmt: impl Fn(&Ident) -> TokenStream) -> TokenStream { match &data.fields { Fields::Named(fields) => { - let recurse = fields.named - .iter() - .filter_map(|f| { + let recurse = fields.named.iter().filter_map(|f| { let name = f.ident.as_ref()?; if name.to_string().starts_with('_') { - return None; + return None } let value = fmt(name); - let ret = quote_spanned!{ f.span() => + let ret = quote_spanned! { f.span() => formatter.field(stringify!(#name), #value); }; Some(ret) }); - quote!{ + quote! { #( #recurse )* } - } - Fields::Unnamed(fields) => quote_spanned!{ + }, + Fields::Unnamed(fields) => quote_spanned! { fields.span() => compile_error!("Unnamed fields are not supported") }, diff --git a/substrate/frame/contracts/rpc/runtime-api/src/lib.rs b/substrate/frame/contracts/rpc/runtime-api/src/lib.rs index bb65e1b837399b725a705a74934def67199608ec..742c2997287d245562c42b76280a6edbe90987f2 100644 --- a/substrate/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/substrate/frame/contracts/rpc/runtime-api/src/lib.rs @@ -24,10 +24,10 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::Codec; -use sp_std::vec::Vec; use pallet_contracts_primitives::{ - ContractExecResult, GetStorageResult, RentProjectionResult, Code, ContractInstantiateResult, + Code, ContractExecResult, ContractInstantiateResult, GetStorageResult, RentProjectionResult, }; +use sp_std::vec::Vec; sp_api::decl_runtime_apis! { /// The API to interact with contracts without using executive. diff --git a/substrate/frame/contracts/rpc/src/lib.rs b/substrate/frame/contracts/rpc/src/lib.rs index 3b95e9850165673003beb762d0c28b4caeb454ac..2586ec7903dd691249f8d7bba17ac4ebdd6b21a5 100644 --- a/substrate/frame/contracts/rpc/src/lib.rs +++ b/substrate/frame/contracts/rpc/src/lib.rs @@ -22,7 +22,9 @@ use std::sync::Arc; use codec::Codec; use jsonrpc_core::{Error, ErrorCode, Result}; use jsonrpc_derive::rpc; -use pallet_contracts_primitives::RentProjection; +use pallet_contracts_primitives::{ + Code, ContractExecResult, ContractInstantiateResult, RentProjection, +}; use serde::{Deserialize, Serialize}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; @@ -33,7 +35,6 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, }; use std::convert::{TryFrom, TryInto}; -use pallet_contracts_primitives::{Code, ContractExecResult, ContractInstantiateResult}; pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; @@ -164,10 +165,7 @@ pub struct Contracts { impl Contracts { /// Create new `Contracts` with the given reference to the client. pub fn new(client: Arc) -> Self { - Contracts { - client, - _marker: Default::default(), - } + Contracts { client, _marker: Default::default() } } } impl @@ -202,13 +200,7 @@ where // If the block hash is not supplied assume the best block. self.client.info().best_hash)); - let CallRequest { - origin, - dest, - value, - gas_limit, - input_data, - } = call_request; + let CallRequest { origin, dest, value, gas_limit, input_data } = call_request; let value: Balance = decode_hex(value, "balance")?; let gas_limit: Weight = decode_hex(gas_limit, "weight")?; @@ -225,20 +217,15 @@ where &self, instantiate_request: InstantiateRequest, at: Option<::Hash>, - ) -> Result::Header as HeaderT>::Number>> { + ) -> Result::Header as HeaderT>::Number>> + { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash)); - let InstantiateRequest { - origin, - endowment, - gas_limit, - code, - data, - salt, - } = instantiate_request; + let InstantiateRequest { origin, endowment, gas_limit, code, data, salt } = + instantiate_request; let endowment: Balance = decode_hex(endowment, "balance")?; let gas_limit: Weight = decode_hex(gas_limit, "weight")?; @@ -337,7 +324,8 @@ mod tests { #[test] fn call_request_should_serialize_deserialize_properly() { type Req = CallRequest; - let req: Req = serde_json::from_str(r#" + let req: Req = serde_json::from_str( + r#" { "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", "dest": "5DRakbLVnjVrW6niwLfHGW24EeCEvDAFGEXrtaYS5M4ynoom", @@ -345,7 +333,9 @@ mod tests { "gasLimit": 1000000000000, "inputData": "0x8c97db39" } - "#).unwrap(); + "#, + ) + .unwrap(); assert_eq!(req.gas_limit.into_u256(), U256::from(0xe8d4a51000u64)); assert_eq!(req.value.into_u256(), U256::from(1234567890987654321u128)); } @@ -353,7 +343,8 @@ mod tests { #[test] fn instantiate_request_should_serialize_deserialize_properly() { type Req = InstantiateRequest; - let req: Req = serde_json::from_str(r#" + let req: Req = serde_json::from_str( + r#" { "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", "endowment": "0x88", @@ -362,7 +353,9 @@ mod tests { "data": "0x4299", "salt": "0x9988" } - "#).unwrap(); + "#, + ) + .unwrap(); assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); assert_eq!(req.endowment.into_u256(), 0x88.into()); @@ -383,7 +376,8 @@ mod tests { let actual = serde_json::to_string(&res).unwrap(); assert_eq!(actual, trim(expected).as_str()); } - test(r#"{ + test( + r#"{ "gasConsumed": 5000, "gasRequired": 8000, "debugMessage": "HelloWorld", @@ -393,25 +387,30 @@ mod tests { "data": "0x1234" } } - }"#); - test(r#"{ + }"#, + ); + test( + r#"{ "gasConsumed": 3400, "gasRequired": 5200, "debugMessage": "HelloWorld", "result": { "Err": "BadOrigin" } - }"#); + }"#, + ); } #[test] fn instantiate_result_should_serialize_deserialize_properly() { fn test(expected: &str) { - let res: ContractInstantiateResult = serde_json::from_str(expected).unwrap(); + let res: ContractInstantiateResult = + serde_json::from_str(expected).unwrap(); let actual = serde_json::to_string(&res).unwrap(); assert_eq!(actual, trim(expected).as_str()); } - test(r#"{ + test( + r#"{ "gasConsumed": 5000, "gasRequired": 8000, "debugMessage": "HelloWorld", @@ -425,14 +424,17 @@ mod tests { "rentProjection": null } } - }"#); - test(r#"{ + }"#, + ); + test( + r#"{ "gasConsumed": 3400, "gasRequired": 5200, "debugMessage": "HelloWorld", "result": { "Err": "BadOrigin" } - }"#); + }"#, + ); } } diff --git a/substrate/frame/contracts/src/benchmarking/code.rs b/substrate/frame/contracts/src/benchmarking/code.rs index cd13e3be6df3a35b49c01bc0801a41da3e5434c6..64bdde9b6ea5ad7949b8e5e77ea1d957f9c75b6d 100644 --- a/substrate/frame/contracts/src/benchmarking/code.rs +++ b/substrate/frame/contracts/src/benchmarking/code.rs @@ -25,21 +25,20 @@ //! compiles it down into a `WasmModule` that can be used as a contract's code. use crate::Config; +use frame_support::traits::Get; use pwasm_utils::{ - stack_height::inject_limiter, parity_wasm::{ + builder, elements::{ - self, Instruction, Instructions, FuncBody, ValueType, BlockType, Section, - CustomSection, + self, BlockType, CustomSection, FuncBody, Instruction, Instructions, Section, ValueType, }, - builder, }, + stack_height::inject_limiter, }; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; use sp_sandbox::{EnvironmentDefinitionBuilder, Memory}; -use sp_std::{prelude::*, convert::TryFrom, borrow::ToOwned}; -use frame_support::traits::Get; +use sp_std::{borrow::ToOwned, convert::TryFrom, prelude::*}; /// Pass to `create_code` in order to create a compiled `WasmModule`. /// @@ -117,7 +116,7 @@ pub struct ImportedFunction { /// A wasm module ready to be put on chain. #[derive(Clone)] -pub struct WasmModule { +pub struct WasmModule { pub code: Vec, pub hash: ::Output, memory: Option, @@ -136,27 +135,37 @@ where let mut contract = builder::module() // deploy function (first internal function) .function() - .signature().build() - .with_body(def.deploy_body.unwrap_or_else(|| - FuncBody::new(Vec::new(), Instructions::empty()) - )) - .build() + .signature() + .build() + .with_body( + def.deploy_body + .unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty())), + ) + .build() // call function (second internal function) .function() - .signature().build() - .with_body(def.call_body.unwrap_or_else(|| - FuncBody::new(Vec::new(), Instructions::empty()) - )) - .build() - .export().field("deploy").internal().func(func_offset).build() - .export().field("call").internal().func(func_offset + 1).build(); + .signature() + .build() + .with_body( + def.call_body + .unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty())), + ) + .build() + .export() + .field("deploy") + .internal() + .func(func_offset) + .build() + .export() + .field("call") + .internal() + .func(func_offset + 1) + .build(); // If specified we add an additional internal function if let Some(body) = def.aux_body { - let mut signature = contract - .function() - .signature(); - for _ in 0 .. def.aux_arg_num { + let mut signature = contract.function().signature(); + for _ in 0..def.aux_arg_num { signature = signature.with_param(ValueType::I64); } contract = signature.build().with_body(body).build(); @@ -164,9 +173,12 @@ where // Grant access to linear memory. if let Some(memory) = &def.memory { - contract = contract.import() - .module("env").field("memory") - .external().memory(memory.min_pages, Some(memory.max_pages)) + contract = contract + .import() + .module("env") + .field("memory") + .external() + .memory(memory.min_pages, Some(memory.max_pages)) .build(); } @@ -177,7 +189,8 @@ where .with_results(func.return_type.into_iter().collect()) .build_sig(); let sig = contract.push_signature(sig); - contract = contract.import() + contract = contract + .import() .module(func.module) .field(func.name) .with_external(elements::External::Function(sig)) @@ -186,7 +199,8 @@ where // Initialize memory for data in def.data_segments { - contract = contract.data() + contract = contract + .data() .offset(Instruction::I32Const(data.offset as i32)) .value(data.value) .build() @@ -194,12 +208,13 @@ where // Add global variables if def.num_globals > 0 { - use rand::{prelude::*, distributions::Standard}; + use rand::{distributions::Standard, prelude::*}; let rng = rand_pcg::Pcg32::seed_from_u64(3112244599778833558); for val in rng.sample_iter(Standard).take(def.num_globals as usize) { contract = contract .global() - .value_type().i64() + .value_type() + .i64() .mutable() .init_expr(Instruction::I64Const(val)) .build() @@ -218,31 +233,22 @@ where // Add the dummy section if def.dummy_section > 0 { - contract = contract.with_section( - Section::Custom( - CustomSection::new("dummy".to_owned(), vec![42; def.dummy_section as usize]) - ) - ); + contract = contract.with_section(Section::Custom(CustomSection::new( + "dummy".to_owned(), + vec![42; def.dummy_section as usize], + ))); } let mut code = contract.build(); // Inject stack height metering if def.inject_stack_metering { - code = inject_limiter( - code, - T::Schedule::get().limits.stack_height - ) - .unwrap(); + code = inject_limiter(code, T::Schedule::get().limits.stack_height).unwrap(); } let code = code.to_bytes().unwrap(); let hash = T::Hashing::hash(&code); - Self { - code, - hash, - memory: def.memory, - } + Self { code, hash, memory: def.memory } } } @@ -266,7 +272,7 @@ where ModuleDefinition { memory: Some(ImportedMemory::max::()), dummy_section: dummy_bytes.saturating_sub(module_overhead), - .. Default::default() + ..Default::default() } .into() } @@ -275,23 +281,18 @@ where /// `instantiate_with_code` for different sizes of wasm modules. The generated module maximizes /// instrumentation runtime by nesting blocks as deeply as possible given the byte budget. pub fn sized(target_bytes: u32) -> Self { - use self::elements::Instruction::{If, I32Const, Return, End}; + use self::elements::Instruction::{End, I32Const, If, Return}; // Base size of a contract is 63 bytes and each expansion adds 6 bytes. // We do one expansion less to account for the code section and function body // size fields inside the binary wasm module representation which are leb128 encoded // and therefore grow in size when the contract grows. We are not allowed to overshoot // because of the maximum code size that is enforced by `instantiate_with_code`. let expansions = (target_bytes.saturating_sub(63) / 6).saturating_sub(1); - const EXPANSION: [Instruction; 4] = [ - I32Const(0), - If(BlockType::NoResult), - Return, - End, - ]; + const EXPANSION: [Instruction; 4] = [I32Const(0), If(BlockType::NoResult), Return, End]; ModuleDefinition { call_body: Some(body::repeated(expansions, &EXPANSION)), memory: Some(ImportedMemory::max::()), - .. Default::default() + ..Default::default() } .into() } @@ -317,12 +318,15 @@ where offset: 0, value: (pages * 64 * 1024 - 4).to_le_bytes().to_vec(), }], - call_body: Some(body::repeated(repeat, &[ - Instruction::I32Const(4), // ptr where to store output - Instruction::I32Const(0), // ptr to length - Instruction::Call(0), // call the imported function - ])), - .. Default::default() + call_body: Some(body::repeated( + repeat, + &[ + Instruction::I32Const(4), // ptr where to store output + Instruction::I32Const(0), // ptr to length + Instruction::Call(0), // call the imported function + ], + )), + ..Default::default() } .into() } @@ -339,13 +343,16 @@ where params: vec![ValueType::I32, ValueType::I32, ValueType::I32], return_type: None, }], - call_body: Some(body::repeated(repeat, &[ - Instruction::I32Const(0), // input_ptr - Instruction::I32Const(data_size as i32), // input_len - Instruction::I32Const(0), // output_ptr - Instruction::Call(0), - ])), - .. Default::default() + call_body: Some(body::repeated( + repeat, + &[ + Instruction::I32Const(0), // input_ptr + Instruction::I32Const(data_size as i32), // input_len + Instruction::I32Const(0), // output_ptr + Instruction::Call(0), + ], + )), + ..Default::default() } .into() } @@ -354,11 +361,7 @@ where /// and adds it to `env`. A reference to that memory is returned so that it can be used to /// access the memory contents from the supervisor. pub fn add_memory(&self, env: &mut EnvironmentDefinitionBuilder) -> Option { - let memory = if let Some(memory) = &self.memory { - memory - } else { - return None; - }; + let memory = if let Some(memory) = &self.memory { memory } else { return None }; let memory = Memory::new(memory.min_pages, Some(memory.max_pages)).unwrap(); env.add_memory("env", "memory", memory.clone()); Some(memory) @@ -367,25 +370,25 @@ where pub fn unary_instr(instr: Instruction, repeat: u32) -> Self { use body::DynInstr::{RandomI64Repeated, Regular}; ModuleDefinition { - call_body: Some(body::repeated_dyn(repeat, vec![ - RandomI64Repeated(1), - Regular(instr), - Regular(Instruction::Drop), - ])), - .. Default::default() - }.into() + call_body: Some(body::repeated_dyn( + repeat, + vec![RandomI64Repeated(1), Regular(instr), Regular(Instruction::Drop)], + )), + ..Default::default() + } + .into() } pub fn binary_instr(instr: Instruction, repeat: u32) -> Self { use body::DynInstr::{RandomI64Repeated, Regular}; ModuleDefinition { - call_body: Some(body::repeated_dyn(repeat, vec![ - RandomI64Repeated(2), - Regular(instr), - Regular(Instruction::Drop), - ])), - .. Default::default() - }.into() + call_body: Some(body::repeated_dyn( + repeat, + vec![RandomI64Repeated(2), Regular(instr), Regular(Instruction::Drop)], + )), + ..Default::default() + } + .into() } } @@ -426,7 +429,7 @@ pub mod body { RandomGetGlobal(u32, u32), /// Insert a SetGlobal with a random offset in [low, high). /// (low, high) - RandomSetGlobal(u32, u32) + RandomSetGlobal(u32, u32), } pub fn plain(instructions: Vec) -> FuncBody { @@ -441,13 +444,13 @@ pub mod body { .take(instructions.len() * usize::try_from(repetitions).unwrap()) .cloned() .chain(sp_std::iter::once(Instruction::End)) - .collect() + .collect(), ); FuncBody::new(Vec::new(), instructions) } pub fn repeated_dyn(repetitions: u32, mut instructions: Vec) -> FuncBody { - use rand::{prelude::*, distributions::Standard}; + use rand::{distributions::Standard, prelude::*}; // We do not need to be secure here. let mut rng = rand_pcg::Pcg32::seed_from_u64(8446744073709551615); @@ -456,50 +459,46 @@ pub mod body { let body = (0..instructions.len()) .cycle() .take(instructions.len() * usize::try_from(repetitions).unwrap()) - .flat_map(|idx| - match &mut instructions[idx] { - DynInstr::Regular(instruction) => vec![instruction.clone()], - DynInstr::Counter(offset, increment_by) => { - let current = *offset; - *offset += *increment_by; - vec![Instruction::I32Const(current as i32)] - }, - DynInstr::RandomUnaligned(low, high) => { - let unaligned = rng.gen_range(*low..*high) | 1; - vec![Instruction::I32Const(unaligned as i32)] - }, - DynInstr::RandomI32(low, high) => { - vec![Instruction::I32Const(rng.gen_range(*low..*high))] - }, - DynInstr::RandomI32Repeated(num) => { - (&mut rng).sample_iter(Standard).take(*num).map(|val| - Instruction::I32Const(val) - ) - .collect() - }, - DynInstr::RandomI64Repeated(num) => { - (&mut rng).sample_iter(Standard).take(*num).map(|val| - Instruction::I64Const(val) - ) - .collect() - }, - DynInstr::RandomGetLocal(low, high) => { - vec![Instruction::GetLocal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomSetLocal(low, high) => { - vec![Instruction::SetLocal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomTeeLocal(low, high) => { - vec![Instruction::TeeLocal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomGetGlobal(low, high) => { - vec![Instruction::GetGlobal(rng.gen_range(*low..*high))] - }, - DynInstr::RandomSetGlobal(low, high) => { - vec![Instruction::SetGlobal(rng.gen_range(*low..*high))] - }, - } - ) + .flat_map(|idx| match &mut instructions[idx] { + DynInstr::Regular(instruction) => vec![instruction.clone()], + DynInstr::Counter(offset, increment_by) => { + let current = *offset; + *offset += *increment_by; + vec![Instruction::I32Const(current as i32)] + }, + DynInstr::RandomUnaligned(low, high) => { + let unaligned = rng.gen_range(*low..*high) | 1; + vec![Instruction::I32Const(unaligned as i32)] + }, + DynInstr::RandomI32(low, high) => { + vec![Instruction::I32Const(rng.gen_range(*low..*high))] + }, + DynInstr::RandomI32Repeated(num) => (&mut rng) + .sample_iter(Standard) + .take(*num) + .map(|val| Instruction::I32Const(val)) + .collect(), + DynInstr::RandomI64Repeated(num) => (&mut rng) + .sample_iter(Standard) + .take(*num) + .map(|val| Instruction::I64Const(val)) + .collect(), + DynInstr::RandomGetLocal(low, high) => { + vec![Instruction::GetLocal(rng.gen_range(*low..*high))] + }, + DynInstr::RandomSetLocal(low, high) => { + vec![Instruction::SetLocal(rng.gen_range(*low..*high))] + }, + DynInstr::RandomTeeLocal(low, high) => { + vec![Instruction::TeeLocal(rng.gen_range(*low..*high))] + }, + DynInstr::RandomGetGlobal(low, high) => { + vec![Instruction::GetGlobal(rng.gen_range(*low..*high))] + }, + DynInstr::RandomSetGlobal(low, high) => { + vec![Instruction::SetGlobal(rng.gen_range(*low..*high))] + }, + }) .chain(sp_std::iter::once(Instruction::End)) .collect(); FuncBody::new(Vec::new(), Instructions::new(body)) diff --git a/substrate/frame/contracts/src/benchmarking/mod.rs b/substrate/frame/contracts/src/benchmarking/mod.rs index cbe5e48a4f02050bddb50cf46ccd9c6a275d289c..83c18f8f79e0a1d30b1b28cd92f038f9bd5a9100 100644 --- a/substrate/frame/contracts/src/benchmarking/mod.rs +++ b/substrate/frame/contracts/src/benchmarking/mod.rs @@ -22,28 +22,28 @@ mod code; mod sandbox; -use crate::{ - *, Pallet as Contracts, - exec::StorageKey, - rent::Rent, - schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, - storage::Storage, -}; use self::{ code::{ body::{self, DynInstr::*}, - ModuleDefinition, DataSegment, ImportedMemory, ImportedFunction, WasmModule, + DataSegment, ImportedFunction, ImportedMemory, ModuleDefinition, WasmModule, }, sandbox::Sandbox, }; +use crate::{ + exec::StorageKey, + rent::Rent, + schedule::{API_BENCHMARK_BATCH_SIZE, INSTR_BENCHMARK_BATCH_SIZE}, + storage::Storage, + Pallet as Contracts, *, +}; use codec::Encode; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::weights::Weight; use frame_system::{Pallet as System, RawOrigin}; -use pwasm_utils::parity_wasm::elements::{Instruction, ValueType, BlockType, BrTableData}; -use sp_runtime::traits::{Hash, Bounded, Zero}; -use sp_std::{default::Default, convert::{TryInto}, vec::Vec, vec}; use pallet_contracts_primitives::RentProjection; -use frame_support::weights::Weight; +use pwasm_utils::parity_wasm::elements::{BlockType, BrTableData, Instruction, ValueType}; +use sp_runtime::traits::{Bounded, Hash, Zero}; +use sp_std::{convert::TryInto, default::Default, vec, vec::Vec}; /// How many batches we do per API benchmark. const API_BENCHMARK_BATCHES: u32 = 20; @@ -74,7 +74,7 @@ impl Endow { /// The maximum amount of balance a caller can transfer without being brought below /// the existential deposit. This assumes that every caller is funded with the amount /// returned by `caller_funding`. - fn max() -> BalanceOf { + fn max() -> BalanceOf { caller_funding::().saturating_sub(T::Currency::minimum_balance()) } } @@ -109,8 +109,7 @@ where module: WasmModule, data: Vec, endowment: Endow, - ) -> Result, &'static str> - { + ) -> Result, &'static str> { let (storage_size, endowment) = match endowment { Endow::CollectRent => { // storage_size cannot be zero because otherwise a contract that is just above @@ -182,7 +181,8 @@ where /// Get the `AliveContractInfo` of the `addr` or an error if it is no longer alive. fn address_alive_info(addr: &T::AccountId) -> Result, &'static str> { - ContractInfoOf::::get(addr).and_then(|c| c.get_alive()) + ContractInfoOf::::get(addr) + .and_then(|c| c.get_alive()) .ok_or("Expected contract to be alive at this point.") } @@ -193,7 +193,8 @@ where /// Return an error if this contract is no tombstone. fn ensure_tombstone(&self) -> Result<(), &'static str> { - ContractInfoOf::::get(&self.account_id).and_then(|c| c.get_tombstone()) + ContractInfoOf::::get(&self.account_id) + .and_then(|c| c.get_tombstone()) .ok_or("Expected contract to be a tombstone at this point.") .map(|_| ()) } @@ -236,16 +237,13 @@ where let contract = Contract::::new(code, vec![], Endow::CollectRent)?; let storage_items = create_storage::(stor_num, stor_size)?; contract.store(&storage_items)?; - Ok(Self { - contract, - storage: storage_items, - }) + Ok(Self { contract, storage: storage_items }) } /// Increase the system block number so that this contract is eligible for eviction. - fn set_block_num_for_eviction(&self) -> Result<(), &'static str> { + fn set_block_num_for_eviction(&self) -> Result<(), &'static str> { System::::set_block_number( - self.contract.eviction_at()? + T::SignedClaimHandicap::get() + 5u32.into() + self.contract.eviction_at()? + T::SignedClaimHandicap::get() + 5u32.into(), ); Ok(()) } @@ -261,15 +259,17 @@ where /// Generate `stor_num` storage items. Each has the size `stor_size`. fn create_storage( stor_num: u32, - stor_size: u32 + stor_size: u32, ) -> Result)>, &'static str> { - (0..stor_num).map(|i| { - let hash = T::Hashing::hash_of(&i) - .as_ref() - .try_into() - .map_err(|_| "Hash too big for storage key")?; - Ok((hash, vec![42u8; stor_size as usize])) - }).collect::, &'static str>>() + (0..stor_num) + .map(|i| { + let hash = T::Hashing::hash_of(&i) + .as_ref() + .try_into() + .map_err(|_| "Hash too big for storage key")?; + Ok((hash, vec![42u8; stor_size as usize])) + }) + .collect::, &'static str>>() } /// The funding that each account that either calls or instantiates contracts is funded with. diff --git a/substrate/frame/contracts/src/benchmarking/sandbox.rs b/substrate/frame/contracts/src/benchmarking/sandbox.rs index a97fcc2b113ecfb290a264b7c2416641d701938a..320ac90cce64eb2c9df6732b4ea554c1bcae152e 100644 --- a/substrate/frame/contracts/src/benchmarking/sandbox.rs +++ b/substrate/frame/contracts/src/benchmarking/sandbox.rs @@ -15,14 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -///! For instruction benchmarking we do no instantiate a full contract but merely the -///! sandbox to execute the wasm code. This is because we do not need the full -///! environment that provides the seal interface as imported functions. - -use super::{ - Config, - code::WasmModule, -}; +/// ! For instruction benchmarking we do no instantiate a full contract but merely the +/// ! sandbox to execute the wasm code. This is because we do not need the full +/// ! environment that provides the seal interface as imported functions. +use super::{code::WasmModule, Config}; use sp_core::crypto::UncheckedFrom; use sp_sandbox::{EnvironmentDefinitionBuilder, Instance, Memory}; @@ -51,9 +47,6 @@ where let memory = module.add_memory(&mut env_builder); let instance = Instance::new(&module.code, &env_builder, &mut ()) .expect("Failed to create benchmarking Sandbox instance"); - Self { - instance, - _memory: memory, - } + Self { instance, _memory: memory } } } diff --git a/substrate/frame/contracts/src/chain_extension.rs b/substrate/frame/contracts/src/chain_extension.rs index bb352c3a93d6f4e347a6d4d8ac6e7038912b218a..13696240fe4ea126534e26d1ce125556d53a1e96 100644 --- a/substrate/frame/contracts/src/chain_extension.rs +++ b/substrate/frame/contracts/src/chain_extension.rs @@ -55,22 +55,19 @@ //! on how to use a chain extension in order to provide new features to ink! contracts. use crate::{ - Error, - wasm::{Runtime, RuntimeCosts}, gas::ChargedAmount, + wasm::{Runtime, RuntimeCosts}, + Error, }; use codec::{Decode, MaxEncodedLen}; use frame_support::weights::Weight; use sp_runtime::DispatchError; -use sp_std::{ - marker::PhantomData, - vec::Vec, -}; +use sp_std::{marker::PhantomData, vec::Vec}; +pub use crate::{exec::Ext, Config}; pub use frame_system::Config as SysConfig; pub use pallet_contracts_primitives::ReturnFlags; pub use sp_core::crypto::UncheckedFrom; -pub use crate::{Config, exec::Ext}; pub use state::Init as InitState; /// Result that returns a [`DispatchError`] on error. @@ -90,7 +87,7 @@ pub trait ChainExtension { /// /// # Parameters /// - `func_id`: The first argument to `seal_call_chain_extension`. Usually used to - /// determine which function to realize. + /// determine which function to realize. /// - `env`: Access to the remaining arguments and the execution environment. /// /// # Return @@ -143,7 +140,7 @@ pub enum RetVal { /// The semantic is the same as for calling `seal_return`: The control returns to /// the caller of the currently executing contract yielding the supplied buffer and /// flags. - Diverging{flags: ReturnFlags, data: Vec}, + Diverging { flags: ReturnFlags, data: Vec }, } /// Grants the chain extension access to its parameters and execution environment. @@ -183,7 +180,9 @@ where /// This is when a maximum a priori amount was charged and then should be partially /// refunded to match the actual amount. pub fn adjust_weight(&mut self, charged: ChargedAmount, actual_weight: Weight) { - self.inner.runtime.adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight)) + self.inner + .runtime + .adjust_gas(charged, RuntimeCosts::ChainExtension(actual_weight)) } /// Grants access to the execution environment of the current contract call. @@ -204,46 +203,31 @@ impl<'a, 'b, E: Ext> Environment<'a, 'b, E, state::Init> { /// It is only available to this crate because only the wasm runtime module needs to /// ever create this type. Chain extensions merely consume it. pub(crate) fn new( - runtime: &'a mut Runtime::<'b, E>, + runtime: &'a mut Runtime<'b, E>, input_ptr: u32, input_len: u32, output_ptr: u32, output_len_ptr: u32, ) -> Self { Environment { - inner: Inner { - runtime, - input_ptr, - input_len, - output_ptr, - output_len_ptr, - }, + inner: Inner { runtime, input_ptr, input_len, output_ptr, output_len_ptr }, phantom: PhantomData, } } /// Use all arguments as integer values. pub fn only_in(self) -> Environment<'a, 'b, E, state::OnlyIn> { - Environment { - inner: self.inner, - phantom: PhantomData, - } + Environment { inner: self.inner, phantom: PhantomData } } /// Use input arguments as integer and output arguments as pointer to a buffer. pub fn prim_in_buf_out(self) -> Environment<'a, 'b, E, state::PrimInBufOut> { - Environment { - inner: self.inner, - phantom: PhantomData, - } + Environment { inner: self.inner, phantom: PhantomData } } /// Use input and output arguments as pointers to a buffer. pub fn buf_in_buf_out(self) -> Environment<'a, 'b, E, state::BufInBufOut> { - Environment { - inner: self.inner, - phantom: PhantomData, - } + Environment { inner: self.inner, phantom: PhantomData } } } @@ -287,10 +271,9 @@ where /// charge the overall costs either using `max_len` (worst case approximation) or using /// [`in_len()`](Self::in_len). pub fn read(&self, max_len: u32) -> Result> { - self.inner.runtime.read_sandbox_memory( - self.inner.input_ptr, - self.inner.input_len.min(max_len), - ) + self.inner + .runtime + .read_sandbox_memory(self.inner.input_ptr, self.inner.input_len.min(max_len)) } /// Reads `min(buffer.len(), in_len) from contract memory. @@ -304,10 +287,7 @@ where let buffer = core::mem::take(buffer); &mut buffer[..len.min(self.inner.input_len as usize)] }; - self.inner.runtime.read_sandbox_memory_into_buf( - self.inner.input_ptr, - sliced, - )?; + self.inner.runtime.read_sandbox_memory_into_buf(self.inner.input_ptr, sliced)?; *buffer = sliced; Ok(()) } @@ -377,7 +357,7 @@ where /// gets too large. struct Inner<'a, 'b, E: Ext> { /// The runtime contains all necessary functions to interact with the running contract. - runtime: &'a mut Runtime::<'b, E>, + runtime: &'a mut Runtime<'b, E>, /// Verbatim argument passed to `seal_call_chain_extension`. input_ptr: u32, /// Verbatim argument passed to `seal_call_chain_extension`. diff --git a/substrate/frame/contracts/src/exec.rs b/substrate/frame/contracts/src/exec.rs index ae1585afbb890bff910f35f6acf20008d3361cc6..2967e4fa418acb9a38b15370d10c9c91c5d2319d 100644 --- a/substrate/frame/contracts/src/exec.rs +++ b/substrate/frame/contracts/src/exec.rs @@ -16,27 +16,29 @@ // limitations under the License. use crate::{ - CodeHash, Event, Config, Pallet as Contracts, - BalanceOf, ContractInfo, gas::GasMeter, rent::{Rent, RentStatus}, storage::Storage, - Error, ContractInfoOf, Schedule, AliveContractInfo, AccountCounter, + gas::GasMeter, + rent::{Rent, RentStatus}, + storage::Storage, + AccountCounter, AliveContractInfo, BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, + Error, Event, Pallet as Contracts, Schedule, }; -use sp_core::crypto::UncheckedFrom; -use sp_std::{ - prelude::*, - marker::PhantomData, - mem, -}; -use sp_runtime::{Perbill, traits::{Convert, Saturating}}; use frame_support::{ - dispatch::{DispatchResult, DispatchError, DispatchResultWithPostInfo, Dispatchable}, + dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable}, + ensure, storage::{with_transaction, TransactionOutcome}, - traits::{ExistenceRequirement, Currency, Time, Randomness, Get, OriginTrait, Filter}, + traits::{Currency, ExistenceRequirement, Filter, Get, OriginTrait, Randomness, Time}, weights::Weight, - ensure, DefaultNoBound, + DefaultNoBound, }; use frame_system::RawOrigin; -use pallet_contracts_primitives::{ExecReturnValue}; -use smallvec::{SmallVec, Array}; +use pallet_contracts_primitives::ExecReturnValue; +use smallvec::{Array, SmallVec}; +use sp_core::crypto::UncheckedFrom; +use sp_runtime::{ + traits::{Convert, Saturating}, + Perbill, +}; +use sp_std::{marker::PhantomData, mem, prelude::*}; pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; @@ -75,10 +77,7 @@ pub struct ExecError { impl> From for ExecError { fn from(error: T) -> Self { - Self { - error: error.into(), - origin: ErrorOrigin::Caller, - } + Self { error: error.into(), origin: ErrorOrigin::Caller } } } @@ -124,7 +123,7 @@ where account_id: &T::AccountId, value: &BalanceOf, contract: &AliveContractInfo, - executable: &E + executable: &E, ) -> Self { Self { total_balance: T::Currency::total_balance(account_id).saturating_add(*value), @@ -187,7 +186,7 @@ pub trait Ext: sealing::Sealed { value: BalanceOf, input_data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue ), ExecError>; + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; /// Transfer all funds to `beneficiary` and delete the contract. /// @@ -218,11 +217,7 @@ pub trait Ext: sealing::Sealed { ) -> Result<(), DispatchError>; /// Transfer some amount of funds into the specified account. - fn transfer( - &mut self, - to: &AccountIdOf, - value: BalanceOf, - ) -> DispatchResult; + fn transfer(&mut self, to: &AccountIdOf, value: BalanceOf) -> DispatchResult; /// Returns the storage entry of the executing account by the given `key`. /// @@ -351,8 +346,7 @@ pub trait Executable: Sized { /// # Note /// /// Charges weight proportional to the code size from the gas meter. - fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) - -> Result<(), DispatchError>; + fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) -> Result<(), DispatchError>; /// Decrement the refcount by one and remove the code when it drops to zero. /// @@ -361,8 +355,10 @@ pub trait Executable: Sized { /// # Note /// /// Charges weight proportional to the code size from the gas meter - fn remove_user(code_hash: CodeHash, gas_meter: &mut GasMeter) - -> Result<(), DispatchError>; + fn remove_user( + code_hash: CodeHash, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError>; /// Execute the specified exported function and return the result. /// @@ -550,15 +546,15 @@ macro_rules! get_cached_or_panic_after_load { qed" ); } - }} + }}; } impl CachedContract { /// Load the `contract_info` from storage if necessary. fn load(&mut self, account_id: &T::AccountId) { if let CachedContract::Invalidated = self { - let contract = >::get(&account_id) - .and_then(|contract| contract.get_alive()); + let contract = + >::get(&account_id).and_then(|contract| contract.get_alive()); if let Some(contract) = contract { *self = CachedContract::Cached(contract); } @@ -610,7 +606,7 @@ where debug_message: Option<&'a mut Vec>, ) -> Result { let (mut stack, executable) = Self::new( - FrameArgs::Call{dest, cached_info: None}, + FrameArgs::Call { dest, cached_info: None }, origin, gas_meter, schedule, @@ -692,18 +688,18 @@ where value_transferred: BalanceOf, gas_meter: &mut GasMeter, gas_limit: Weight, - schedule: &Schedule + schedule: &Schedule, ) -> Result<(Frame, E), ExecError> { let (account_id, contract_info, executable, entry_point) = match frame_args { - FrameArgs::Call{dest, cached_info} => { + FrameArgs::Call { dest, cached_info } => { let contract = if let Some(contract) = cached_info { contract } else { >::get(&dest) .ok_or(>::ContractNotFound.into()) - .and_then(|contract| + .and_then(|contract| { contract.get_alive().ok_or(>::ContractIsTombstone) - )? + })? }; let executable = E::from_storage(contract.code_hash, schedule, gas_meter)?; @@ -713,15 +709,14 @@ where // changes would be rolled back in case this contract is called by another // contract. // See: https://github.com/paritytech/substrate/issues/6439#issuecomment-648754324 - let contract = Rent:: - ::charge(&dest, contract, executable.occupied_storage())? - .ok_or(Error::::RentNotPaid)?; + let contract = + Rent::::charge(&dest, contract, executable.occupied_storage())? + .ok_or(Error::::RentNotPaid)?; (dest, contract, executable, ExportedFunction::Call) - } - FrameArgs::Instantiate{sender, trie_seed, executable, salt} => { - let account_id = >::contract_address( - &sender, executable.code_hash(), &salt, - ); + }, + FrameArgs::Instantiate { sender, trie_seed, executable, salt } => { + let account_id = + >::contract_address(&sender, executable.code_hash(), &salt); let trie_id = Storage::::generate_trie_id(&account_id, trie_seed); let contract = Storage::::new_contract( &account_id, @@ -729,12 +724,15 @@ where executable.code_hash().clone(), )?; (account_id, contract, executable, ExportedFunction::Constructor) - } + }, }; let frame = Frame { rent_params: RentParams::new( - &account_id, &value_transferred, &contract_info, &executable, + &account_id, + &value_transferred, + &contract_info, + &executable, ), value_transferred, contract_info: CachedContract::Cached(contract_info), @@ -755,7 +753,7 @@ where gas_limit: Weight, ) -> Result { if self.frames.len() == T::CallStack::size() { - return Err(Error::::MaxCallDepthReached.into()); + return Err(Error::::MaxCallDepthReached.into()) } // We need to make sure that changes made to the contract info are not discarded. @@ -772,17 +770,10 @@ where ); } - let nested_meter = &mut self.frames - .last_mut() - .unwrap_or(&mut self.first_frame) - .nested_meter; - let (frame, executable) = Self::new_frame( - frame_args, - value_transferred, - nested_meter, - gas_limit, - self.schedule, - )?; + let nested_meter = + &mut self.frames.last_mut().unwrap_or(&mut self.first_frame).nested_meter; + let (frame, executable) = + Self::new_frame(frame_args, value_transferred, nested_meter, gas_limit, self.schedule)?; self.frames.push(frame); Ok(executable) } @@ -790,11 +781,7 @@ where /// Run the current (top) frame. /// /// This can be either a call or an instantiate. - fn run( - &mut self, - executable: E, - input_data: Vec - ) -> Result { + fn run(&mut self, executable: E, input_data: Vec) -> Result { let entry_point = self.top_frame().entry_point; let do_transaction = || { // Cache the value before calling into the constructor because that @@ -807,11 +794,9 @@ where self.initial_transfer()?; // Call into the wasm blob. - let output = executable.execute( - self, - &entry_point, - input_data, - ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; + let output = executable + .execute(self, &entry_point, input_data) + .map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; // Additional work needs to be performed in case of an instantiation. if output.is_success() && entry_point == ExportedFunction::Constructor { @@ -820,7 +805,7 @@ where // It is not allowed to terminate a contract inside its constructor. if let CachedContract::Terminated = frame.contract_info { - return Err(Error::::TerminatedInConstructor.into()); + return Err(Error::::TerminatedInConstructor.into()) } // Collect the rent for the first block to prevent the creation of very large @@ -828,16 +813,13 @@ where // This also makes sure that it is above the subsistence threshold // in order to keep up the guarantuee that we always leave a tombstone behind // with the exception of a contract that called `seal_terminate`. - let contract = Rent:: - ::charge(&account_id, frame.invalidate(), occupied_storage)? - .ok_or(Error::::NewContractNotFunded)?; + let contract = + Rent::::charge(&account_id, frame.invalidate(), occupied_storage)? + .ok_or(Error::::NewContractNotFunded)?; frame.contract_info = CachedContract::Cached(contract); // Deposit an instantiation event. - deposit_event::(vec![], Event::Instantiated( - self.caller().clone(), - account_id, - )); + deposit_event::(vec![], Event::Instantiated(self.caller().clone(), account_id)); } Ok(output) @@ -849,9 +831,7 @@ where let (success, output) = with_transaction(|| { let output = do_transaction(); match &output { - Ok(result) if result.is_success() => { - TransactionOutcome::Commit((true, output)) - }, + Ok(result) if result.is_success() => TransactionOutcome::Commit((true, output)), _ => TransactionOutcome::Rollback((false, output)), } }); @@ -880,7 +860,7 @@ where prev.nested_meter.absorb_nested(frame.nested_meter); // Only gas counter changes are persisted in case of a failure. if !persist { - return; + return } if let CachedContract::Cached(contract) = frame.contract_info { // optimization: Predecessor is the same contract. @@ -889,7 +869,7 @@ where // trigger a rollback. if prev.account_id == *account_id { prev.contract_info = CachedContract::Cached(contract); - return; + return } // Predecessor is a different contract: We persist the info and invalidate the first @@ -914,12 +894,12 @@ where self.gas_meter.absorb_nested(mem::take(&mut self.first_frame.nested_meter)); // Only gas counter changes are persisted in case of a failure. if !persist { - return; + return } if let CachedContract::Cached(contract) = &self.first_frame.contract_info { >::insert( &self.first_frame.account_id, - ContractInfo::Alive(contract.clone()) + ContractInfo::Alive(contract.clone()), ); } if let Some(counter) = self.account_counter { @@ -942,7 +922,7 @@ where value: BalanceOf, ) -> DispatchResult { if value == 0u32.into() { - return Ok(()); + return Ok(()) } let existence_requirement = match (allow_death, sender_is_contract) { @@ -974,16 +954,10 @@ where // we can error out early. This avoids executing the constructor in cases where // we already know that the contract has too little balance. if frame.entry_point == ExportedFunction::Constructor && value < subsistence_threshold { - return Err(>::NewContractNotFunded.into()); + return Err(>::NewContractNotFunded.into()) } - Self::transfer( - self.caller_is_origin(), - false, - self.caller(), - &frame.account_id, - value, - ) + Self::transfer(self.caller_is_origin(), false, self.caller(), &frame.account_id, value) } /// Wether the caller is the initiator of the call stack. @@ -1004,17 +978,13 @@ where /// Iterator over all frames. /// /// The iterator starts with the top frame and ends with the root frame. - fn frames(&self) -> impl Iterator> { - sp_std::iter::once(&self.first_frame) - .chain(&self.frames) - .rev() + fn frames(&self) -> impl Iterator> { + sp_std::iter::once(&self.first_frame).chain(&self.frames).rev() } /// Same as `frames` but with a mutable reference as iterator item. - fn frames_mut(&mut self) -> impl Iterator> { - sp_std::iter::once(&mut self.first_frame) - .chain(&mut self.frames) - .rev() + fn frames_mut(&mut self) -> impl Iterator> { + sp_std::iter::once(&mut self.first_frame).chain(&mut self.frames).rev() } /// Returns whether the current contract is on the stack multiple times. @@ -1068,7 +1038,7 @@ where let try_call = || { if !self.allows_reentry(&to) { - return Err(>::ReentranceDenied.into()); + return Err(>::ReentranceDenied.into()) } // We ignore instantiate frames in our search for a cached contract. // Otherwise it would be possible to recursively call a contract from its own @@ -1076,17 +1046,12 @@ where let cached_info = self .frames() .find(|f| f.entry_point == ExportedFunction::Call && f.account_id == to) - .and_then(|f| { - match &f.contract_info { - CachedContract::Cached(contract) => Some(contract.clone()), - _ => None, - } + .and_then(|f| match &f.contract_info { + CachedContract::Cached(contract) => Some(contract.clone()), + _ => None, }); - let executable = self.push_frame( - FrameArgs::Call{dest: to, cached_info}, - value, - gas_limit - )?; + let executable = + self.push_frame(FrameArgs::Call { dest: to, cached_info }, value, gas_limit)?; self.run(executable, input_data) }; @@ -1125,7 +1090,7 @@ where fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { if self.is_recursive() { - return Err(Error::::TerminatedWhileReentrant.into()); + return Err(Error::::TerminatedWhileReentrant.into()) } let frame = self.top_frame_mut(); let info = frame.terminate(); @@ -1139,9 +1104,10 @@ where )?; ContractInfoOf::::remove(&frame.account_id); E::remove_user(info.code_hash, &mut frame.nested_meter)?; - Contracts::::deposit_event( - Event::Terminated(frame.account_id.clone(), beneficiary.clone()), - ); + Contracts::::deposit_event(Event::Terminated( + frame.account_id.clone(), + beneficiary.clone(), + )); Ok(()) } @@ -1153,7 +1119,7 @@ where delta: Vec, ) -> Result<(), DispatchError> { if self.is_recursive() { - return Err(Error::::TerminatedWhileReentrant.into()); + return Err(Error::::TerminatedWhileReentrant.into()) } let frame = self.top_frame_mut(); let origin_contract = frame.contract_info().clone(); @@ -1170,23 +1136,14 @@ where if let Ok(_) = result { deposit_event::( vec![], - Event::Restored( - account_id, - dest, - code_hash, - rent_allowance, - ), + Event::Restored(account_id, dest, code_hash, rent_allowance), ); frame.terminate(); } result } - fn transfer( - &mut self, - to: &T::AccountId, - value: BalanceOf, - ) -> DispatchResult { + fn transfer(&mut self, to: &T::AccountId, value: BalanceOf) -> DispatchResult { Self::transfer(true, false, &self.top_frame().account_id, to, value) } @@ -1197,9 +1154,7 @@ where fn set_storage(&mut self, key: StorageKey, value: Option>) -> DispatchResult { let block_number = self.block_number; let frame = self.top_frame_mut(); - Storage::::write( - block_number, frame.contract_info(), &key, value, - ) + Storage::::write(block_number, frame.contract_info(), &key, value) } fn address(&self) -> &T::AccountId { @@ -1237,7 +1192,7 @@ where fn deposit_event(&mut self, topics: Vec, data: Vec) { deposit_event::( topics, - Event::ContractEmitted(self.top_frame().account_id.clone(), data) + Event::ContractEmitted(self.top_frame().account_id.clone(), data), ); } @@ -1249,7 +1204,9 @@ where self.top_frame_mut().contract_info().rent_allowance } - fn block_number(&self) -> T::BlockNumber { self.block_number } + fn block_number(&self) -> T::BlockNumber { + self.block_number + } fn max_value_size(&self) -> u32 { T::Schedule::get().limits.payload_len @@ -1303,10 +1260,7 @@ where } } -fn deposit_event( - topics: Vec, - event: Event, -) { +fn deposit_event(topics: Vec, event: Event) { >::deposit_event_indexed( &*topics, ::Event::from(event).into(), @@ -1336,24 +1290,27 @@ mod sealing { mod tests { use super::*; use crate::{ + exec::ExportedFunction::*, gas::GasMeter, storage::Storage, tests::{ - ALICE, BOB, CHARLIE, Call, TestFilter, ExtBuilder, Test, Event as MetaEvent, - test_utils::{place_contract, set_balance, get_balance}, + test_utils::{get_balance, place_contract, set_balance}, + Call, Event as MetaEvent, ExtBuilder, Test, TestFilter, ALICE, BOB, CHARLIE, }, - exec::ExportedFunction::*, Error, Weight, }; - use codec::{Encode, Decode}; - use sp_core::Bytes; - use sp_runtime::{DispatchError, traits::{BadOrigin, Hash}}; use assert_matches::assert_matches; - use std::{cell::RefCell, collections::HashMap, rc::Rc}; - use pretty_assertions::{assert_eq, assert_ne}; - use pallet_contracts_primitives::ReturnFlags; - use frame_support::{assert_ok, assert_err}; + use codec::{Decode, Encode}; + use frame_support::{assert_err, assert_ok}; use frame_system::{EventRecord, Phase}; + use pallet_contracts_primitives::ReturnFlags; + use pretty_assertions::{assert_eq, assert_ne}; + use sp_core::Bytes; + use sp_runtime::{ + traits::{BadOrigin, Hash}, + DispatchError, + }; + use std::{cell::RefCell, collections::HashMap, rc::Rc}; type System = frame_system::Pallet; @@ -1404,12 +1361,15 @@ mod tests { // Generate code hashes as monotonically increasing values. let hash = ::Hash::from_low_u64_be(loader.counter); loader.counter += 1; - loader.map.insert(hash, MockExecutable { - func: Rc::new(f), - func_type, - code_hash: hash.clone(), - refcount: 1, - }); + loader.map.insert( + hash, + MockExecutable { + func: Rc::new(f), + func_type, + code_hash: hash.clone(), + refcount: 1, + }, + ); hash }) } @@ -1417,7 +1377,8 @@ mod tests { fn increment_refcount(code_hash: CodeHash) { LOADER.with(|loader| { let mut loader = loader.borrow_mut(); - loader.map + loader + .map .entry(code_hash) .and_modify(|executable| executable.refcount += 1) .or_insert_with(|| panic!("code_hash does not exist")); @@ -1442,12 +1403,7 @@ mod tests { fn refcount(code_hash: &CodeHash) -> u32 { LOADER.with(|loader| { - loader - .borrow() - .map - .get(code_hash) - .expect("code_hash does not exist") - .refcount() + loader.borrow().map.get(code_hash).expect("code_hash does not exist").refcount() }) } } @@ -1463,7 +1419,8 @@ mod tests { fn from_storage_noinstr(code_hash: CodeHash) -> Result { LOADER.with(|loader| { - loader.borrow_mut() + loader + .borrow_mut() .map .get(&code_hash) .cloned() @@ -1475,16 +1432,18 @@ mod tests { MockLoader::decrement_refcount(self.code_hash); } - fn add_user(code_hash: CodeHash, _: &mut GasMeter) - -> Result<(), DispatchError> - { + fn add_user( + code_hash: CodeHash, + _: &mut GasMeter, + ) -> Result<(), DispatchError> { MockLoader::increment_refcount(code_hash); Ok(()) } - fn remove_user(code_hash: CodeHash, _: &mut GasMeter) - -> Result<(), DispatchError> - { + fn remove_user( + code_hash: CodeHash, + _: &mut GasMeter, + ) -> Result<(), DispatchError> { MockLoader::decrement_refcount(code_hash); Ok(()) } @@ -1499,10 +1458,7 @@ mod tests { MockLoader::increment_refcount(self.code_hash); } if function == &self.func_type { - (self.func)(MockCtx { - ext, - input_data, - }, &self) + (self.func)(MockCtx { ext, input_data }, &self) } else { exec_success() } @@ -1551,9 +1507,7 @@ mod tests { place_contract(&BOB, exec_ch); assert_matches!( - MockStack::run_call( - ALICE, BOB, &mut gas_meter, &schedule, value, vec![], None, - ), + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, value, vec![], None,), Ok(_) ); }); @@ -1572,13 +1526,7 @@ mod tests { set_balance(&origin, 100); set_balance(&dest, 0); - MockStack::transfer( - true, - false, - &origin, - &dest, - 55, - ).unwrap(); + MockStack::transfer(true, false, &origin, &dest, 55).unwrap(); assert_eq!(get_balance(&origin), 45); assert_eq!(get_balance(&dest), 55); @@ -1592,10 +1540,9 @@ mod tests { let origin = ALICE; let dest = BOB; - let return_ch = MockLoader::insert( - Call, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) - ); + let return_ch = MockLoader::insert(Call, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) + }); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -1611,7 +1558,8 @@ mod tests { 55, vec![], None, - ).unwrap(); + ) + .unwrap(); assert!(!output.is_success()); assert_eq!(get_balance(&origin), 100); @@ -1631,18 +1579,9 @@ mod tests { ExtBuilder::default().build().execute_with(|| { set_balance(&origin, 0); - let result = MockStack::transfer( - false, - false, - &origin, - &dest, - 100, - ); + let result = MockStack::transfer(false, false, &origin, &dest, 100); - assert_eq!( - result, - Err(Error::::TransferFailed.into()) - ); + assert_eq!(result, Err(Error::::TransferFailed.into())); assert_eq!(get_balance(&origin), 0); assert_eq!(get_balance(&dest), 0); }); @@ -1654,10 +1593,9 @@ mod tests { // is returned from the execution context. let origin = ALICE; let dest = BOB; - let return_ch = MockLoader::insert( - Call, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) - ); + let return_ch = MockLoader::insert(Call, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) + }); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -1685,10 +1623,9 @@ mod tests { // is returned from the execution context. let origin = ALICE; let dest = BOB; - let return_ch = MockLoader::insert( - Call, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) - ); + let return_ch = MockLoader::insert(Call, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) + }); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -1747,9 +1684,8 @@ mod tests { let schedule = ::Schedule::get(); let subsistence = Contracts::::subsistence_threshold(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage( - input_data_ch, &schedule, &mut gas_meter - ).unwrap(); + let executable = + MockExecutable::from_storage(input_data_ch, &schedule, &mut gas_meter).unwrap(); set_balance(&ALICE, subsistence * 10); @@ -1784,10 +1720,7 @@ mod tests { if !*reached_bottom { // We are first time here, it means we just reached bottom. // Verify that we've got proper error and set `reached_bottom`. - assert_eq!( - r, - Err(Error::::MaxCallDepthReached.into()) - ); + assert_eq!(r, Err(Error::::MaxCallDepthReached.into())); *reached_bottom = true; } else { // We just unwinding stack here. @@ -1829,22 +1762,17 @@ mod tests { let bob_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for bob. - WITNESSED_CALLER_BOB.with(|caller| - *caller.borrow_mut() = Some(ctx.ext.caller().clone()) - ); + WITNESSED_CALLER_BOB + .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); // Call into CHARLIE contract. - assert_matches!( - ctx.ext.call(0, CHARLIE, 0, vec![], true), - Ok(_) - ); + assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { // Record the caller for charlie. - WITNESSED_CALLER_CHARLIE.with(|caller| - *caller.borrow_mut() = Some(ctx.ext.caller().clone()) - ); + WITNESSED_CALLER_CHARLIE + .with(|caller| *caller.borrow_mut() = Some(ctx.ext.caller().clone())); exec_success() }); @@ -1877,10 +1805,7 @@ mod tests { assert_eq!(*ctx.ext.address(), BOB); // Call into charlie contract. - assert_matches!( - ctx.ext.call(0, CHARLIE, 0, vec![], true), - Ok(_) - ); + assert_matches!(ctx.ext.call(0, CHARLIE, 0, vec![], true), Ok(_)); exec_success() }); let charlie_ch = MockLoader::insert(Call, |ctx, _| { @@ -1914,9 +1839,8 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let schedule = ::Schedule::get(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage( - dummy_ch, &schedule, &mut gas_meter - ).unwrap(); + let executable = + MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); assert_matches!( MockStack::run_instantiate( @@ -1936,17 +1860,15 @@ mod tests { #[test] fn instantiation_work_with_success_output() { - let dummy_ch = MockLoader::insert( - Constructor, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) - ); + let dummy_ch = MockLoader::insert(Constructor, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) + }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let schedule = ::Schedule::get(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage( - dummy_ch, &schedule, &mut gas_meter - ).unwrap(); + let executable = + MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( @@ -1965,26 +1887,25 @@ mod tests { // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&events(), &[ - Event::Instantiated(ALICE, instantiated_contract_address) - ]); + assert_eq!( + Storage::::code_hash(&instantiated_contract_address).unwrap(), + dummy_ch + ); + assert_eq!(&events(), &[Event::Instantiated(ALICE, instantiated_contract_address)]); }); } #[test] fn instantiation_fails_with_failing_output() { - let dummy_ch = MockLoader::insert( - Constructor, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) - ); + let dummy_ch = MockLoader::insert(Constructor, |_, _| { + Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) + }); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let schedule = ::Schedule::get(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage( - dummy_ch, &schedule, &mut gas_meter - ).unwrap(); + let executable = + MockExecutable::from_storage(dummy_ch, &schedule, &mut gas_meter).unwrap(); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( @@ -2016,13 +1937,16 @@ mod tests { let instantiated_contract_address = Rc::clone(&instantiated_contract_address); move |ctx, _| { // Instantiate a contract and save it's address in `instantiated_contract_address`. - let (address, output) = ctx.ext.instantiate( - 0, - dummy_ch, - Contracts::::subsistence_threshold() * 3, - vec![], - &[48, 49, 50], - ).unwrap(); + let (address, output) = ctx + .ext + .instantiate( + 0, + dummy_ch, + Contracts::::subsistence_threshold() * 3, + vec![], + &[48, 49, 50], + ) + .unwrap(); *instantiated_contract_address.borrow_mut() = address.into(); Ok(output) @@ -2036,27 +1960,33 @@ mod tests { assert_matches!( MockStack::run_call( - ALICE, BOB, &mut GasMeter::::new(GAS_LIMIT), &schedule, 20, vec![], None, + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 20, + vec![], + None, ), Ok(_) ); - let instantiated_contract_address = instantiated_contract_address.borrow().as_ref().unwrap().clone(); + let instantiated_contract_address = + instantiated_contract_address.borrow().as_ref().unwrap().clone(); // Check that the newly created account has the expected code hash and // there are instantiation event. - assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&events(), &[ - Event::Instantiated(BOB, instantiated_contract_address) - ]); + assert_eq!( + Storage::::code_hash(&instantiated_contract_address).unwrap(), + dummy_ch + ); + assert_eq!(&events(), &[Event::Instantiated(BOB, instantiated_contract_address)]); }); } #[test] fn instantiation_traps() { - let dummy_ch = MockLoader::insert(Constructor, - |_, _| Err("It's a trap!".into()) - ); + let dummy_ch = MockLoader::insert(Constructor, |_, _| Err("It's a trap!".into())); let instantiator_ch = MockLoader::insert(Call, { let dummy_ch = dummy_ch.clone(); move |ctx, _| { @@ -2087,7 +2017,13 @@ mod tests { assert_matches!( MockStack::run_call( - ALICE, BOB, &mut GasMeter::::new(GAS_LIMIT), &schedule, 20, vec![], None, + ALICE, + BOB, + &mut GasMeter::::new(GAS_LIMIT), + &schedule, + 20, + vec![], + None, ), Ok(_) ); @@ -2105,36 +2041,29 @@ mod tests { exec_success() }); - ExtBuilder::default() - .existential_deposit(15) - .build() - .execute_with(|| { - let schedule = ::Schedule::get(); - let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage( - terminate_ch, &schedule, &mut gas_meter - ).unwrap(); - set_balance(&ALICE, 1000); - - assert_eq!( - MockStack::run_instantiate( - ALICE, - executable, - &mut gas_meter, - &schedule, - 100, - vec![], - &[], - None, - ), - Err(Error::::TerminatedInConstructor.into()) - ); + ExtBuilder::default().existential_deposit(15).build().execute_with(|| { + let schedule = ::Schedule::get(); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = + MockExecutable::from_storage(terminate_ch, &schedule, &mut gas_meter).unwrap(); + set_balance(&ALICE, 1000); - assert_eq!( - &events(), - &[] - ); - }); + assert_eq!( + MockStack::run_instantiate( + ALICE, + executable, + &mut gas_meter, + &schedule, + 100, + vec![], + &[], + None, + ), + Err(Error::::TerminatedInConstructor.into()) + ); + + assert_eq!(&events(), &[]); + }); } #[test] @@ -2152,9 +2081,8 @@ mod tests { let subsistence = Contracts::::subsistence_threshold(); let schedule = ::Schedule::get(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage( - rent_allowance_ch, &schedule, &mut gas_meter - ).unwrap(); + let executable = + MockExecutable::from_storage(rent_allowance_ch, &schedule, &mut gas_meter).unwrap(); set_balance(&ALICE, subsistence * 10); let result = MockStack::run_instantiate( @@ -2175,9 +2103,8 @@ mod tests { fn rent_params_works() { let code_hash = MockLoader::insert(Call, |ctx, executable| { let address = ctx.ext.address(); - let contract = >::get(address) - .and_then(|c| c.get_alive()) - .unwrap(); + let contract = + >::get(address).and_then(|c| c.get_alive()).unwrap(); assert_eq!(ctx.ext.rent_params(), &RentParams::new(address, &0, &contract, executable)); exec_success() }); @@ -2188,15 +2115,7 @@ mod tests { let mut gas_meter = GasMeter::::new(GAS_LIMIT); set_balance(&ALICE, subsistence * 10); place_contract(&BOB, code_hash); - MockStack::run_call( - ALICE, - BOB, - &mut gas_meter, - &schedule, - 0, - vec![], - None, - ).unwrap(); + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, 0, vec![], None).unwrap(); }); } @@ -2205,9 +2124,8 @@ mod tests { let code_hash = MockLoader::insert(Call, |ctx, executable| { let subsistence = Contracts::::subsistence_threshold(); let address = ctx.ext.address(); - let contract = >::get(address) - .and_then(|c| c.get_alive()) - .unwrap(); + let contract = + >::get(address).and_then(|c| c.get_alive()).unwrap(); let rent_params = RentParams::new(address, &0, &contract, executable); // Changing the allowance during the call: rent params stay unchanged. @@ -2219,13 +2137,9 @@ mod tests { // Creating another instance from the same code_hash increases the refcount. // This is also not reflected in the rent params. assert_eq!(MockLoader::refcount(&executable.code_hash), 1); - ctx.ext.instantiate( - 0, - executable.code_hash, - subsistence * 25, - vec![], - &[], - ).unwrap(); + ctx.ext + .instantiate(0, executable.code_hash, subsistence * 25, vec![], &[]) + .unwrap(); assert_eq!(MockLoader::refcount(&executable.code_hash), 2); assert_eq!(ctx.ext.rent_params(), &rent_params); @@ -2246,31 +2160,38 @@ mod tests { subsistence * 50, vec![], None, - ).unwrap(); + ) + .unwrap(); }); } #[test] fn rent_status_works() { let code_hash = MockLoader::insert(Call, |ctx, _| { - assert_eq!(ctx.ext.rent_status(0), RentStatus { - max_deposit: 80000, - current_deposit: 80000, - custom_refcount_deposit: None, - max_rent: 32, - current_rent: 32, - custom_refcount_rent: None, - _reserved: None, - }); - assert_eq!(ctx.ext.rent_status(1), RentStatus { - max_deposit: 80000, - current_deposit: 80000, - custom_refcount_deposit: Some(80000), - max_rent: 32, - current_rent: 32, - custom_refcount_rent: Some(32), - _reserved: None, - }); + assert_eq!( + ctx.ext.rent_status(0), + RentStatus { + max_deposit: 80000, + current_deposit: 80000, + custom_refcount_deposit: None, + max_rent: 32, + current_rent: 32, + custom_refcount_rent: None, + _reserved: None, + } + ); + assert_eq!( + ctx.ext.rent_status(1), + RentStatus { + max_deposit: 80000, + current_deposit: 80000, + custom_refcount_deposit: Some(80000), + max_rent: 32, + current_rent: 32, + custom_refcount_rent: Some(32), + _reserved: None, + } + ); exec_success() }); @@ -2280,15 +2201,7 @@ mod tests { let mut gas_meter = GasMeter::::new(GAS_LIMIT); set_balance(&ALICE, subsistence * 10); place_contract(&BOB, code_hash); - MockStack::run_call( - ALICE, - BOB, - &mut gas_meter, - &schedule, - 0, - vec![], - None, - ).unwrap(); + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, 0, vec![], None).unwrap(); }); } @@ -2308,10 +2221,7 @@ mod tests { let changed_allowance = >::max_value() / 2; assert_ne!(original_allowance, changed_allowance); ctx.ext.set_rent_allowance(changed_allowance); - assert_eq!( - ctx.ext.call(0, CHARLIE, 0, vec![], true), - exec_trapped() - ); + assert_eq!(ctx.ext.call(0, CHARLIE, 0, vec![], true), exec_trapped()); assert_eq!(ctx.ext.rent_allowance(), changed_allowance); assert_ne!(ctx.ext.rent_allowance(), original_allowance); } @@ -2356,9 +2266,7 @@ mod tests { let schedule = ::Schedule::get(); let subsistence = Contracts::::subsistence_threshold(); let mut gas_meter = GasMeter::::new(GAS_LIMIT); - let executable = MockExecutable::from_storage( - code, &schedule, &mut gas_meter - ).unwrap(); + let executable = MockExecutable::from_storage(code, &schedule, &mut gas_meter).unwrap(); set_balance(&ALICE, subsistence * 10); @@ -2400,7 +2308,8 @@ mod tests { 0, vec![], Some(&mut debug_buffer), - ).unwrap(); + ) + .unwrap(); }); assert_eq!(&String::from_utf8(debug_buffer).unwrap(), "This is a testMore text"); @@ -2445,9 +2354,7 @@ mod tests { ctx.ext.call(0, dest, 0, vec![], false) }); - let code_charlie = MockLoader::insert(Call, |_, _| { - exec_success() - }); + let code_charlie = MockLoader::insert(Call, |_, _| exec_success()); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -2475,7 +2382,8 @@ mod tests { 0, BOB.encode(), None, - ).map_err(|e| e.error), + ) + .map_err(|e| e.error), >::ReentranceDenied, ); }); @@ -2492,9 +2400,8 @@ mod tests { }); // call BOB with input set to '1' - let code_charlie = MockLoader::insert(Call, |ctx, _| { - ctx.ext.call(0, BOB, 0, vec![1], true) - }); + let code_charlie = + MockLoader::insert(Call, |ctx, _| ctx.ext.call(0, BOB, 0, vec![1], true)); ExtBuilder::default().build().execute_with(|| { let schedule = ::Schedule::get(); @@ -2511,7 +2418,8 @@ mod tests { 0, vec![0], None, - ).map_err(|e| e.error), + ) + .map_err(|e| e.error), >::ReentranceDenied, ); }); @@ -2532,24 +2440,17 @@ mod tests { set_balance(&ALICE, subsistence * 10); place_contract(&BOB, code_hash); System::reset_events(); - MockStack::run_call( - ALICE, - BOB, - &mut gas_meter, - &schedule, - 0, - vec![], - None, - ).unwrap(); + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, 0, vec![], None).unwrap(); let remark_hash = ::Hashing::hash(b"Hello World"); - assert_eq!(System::events(), vec![ - EventRecord { + assert_eq!( + System::events(), + vec![EventRecord { phase: Phase::Initialization, event: MetaEvent::System(frame_system::Event::Remarked(BOB, remark_hash)), topics: vec![], - }, - ]); + },] + ); }); } @@ -2567,17 +2468,14 @@ mod tests { let forbidden_call = Call::Balances(BalanceCall::transfer(CHARLIE, 22)); // simple cases: direct call - assert_err!( - ctx.ext.call_runtime(forbidden_call.clone()), - BadOrigin, - ); + assert_err!(ctx.ext.call_runtime(forbidden_call.clone()), BadOrigin,); // as part of a patch: return is OK (but it interrupted the batch) - assert_ok!( - ctx.ext.call_runtime(Call::Utility(UtilCall::batch(vec![ - allowed_call.clone(), forbidden_call, allowed_call - ]))), - ); + assert_ok!(ctx.ext.call_runtime(Call::Utility(UtilCall::batch(vec![ + allowed_call.clone(), + forbidden_call, + allowed_call + ]))),); // the transfer wasn't performed assert_eq!(get_balance(&CHARLIE), 0); @@ -2585,11 +2483,9 @@ mod tests { exec_success() }); - TestFilter::set_filter(|call| { - match call { - Call::Balances(pallet_balances::Call::transfer(_, _)) => false, - _ => true, - } + TestFilter::set_filter(|call| match call { + Call::Balances(pallet_balances::Call::transfer(_, _)) => false, + _ => true, }); ExtBuilder::default().build().execute_with(|| { @@ -2599,31 +2495,27 @@ mod tests { set_balance(&ALICE, subsistence * 10); place_contract(&BOB, code_hash); System::reset_events(); - MockStack::run_call( - ALICE, - BOB, - &mut gas_meter, - &schedule, - 0, - vec![], - None, - ).unwrap(); + MockStack::run_call(ALICE, BOB, &mut gas_meter, &schedule, 0, vec![], None).unwrap(); let remark_hash = ::Hashing::hash(b"Hello"); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::System(frame_system::Event::Remarked(BOB, remark_hash)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::Utility( - pallet_utility::Event::BatchInterrupted(1, BadOrigin.into()), - ), - topics: vec![], - }, - ]); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::System(frame_system::Event::Remarked(BOB, remark_hash)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted( + 1, + BadOrigin.into() + ),), + topics: vec![], + }, + ] + ); }); } } diff --git a/substrate/frame/contracts/src/gas.rs b/substrate/frame/contracts/src/gas.rs index 64f410c4cef2bcf3b33a543c22d14f06f2dc5b56..38d18c1e24c190dcc1f1da33e55e8bf43c9df275 100644 --- a/substrate/frame/contracts/src/gas.rs +++ b/substrate/frame/contracts/src/gas.rs @@ -15,17 +15,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Config, Error, exec::ExecError}; -use sp_std::marker::PhantomData; -use sp_runtime::traits::Zero; +use crate::{exec::ExecError, Config, Error}; use frame_support::{ dispatch::{ - DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo, DispatchError, + DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo, PostDispatchInfo, }, weights::Weight, DefaultNoBound, }; use sp_core::crypto::UncheckedFrom; +use sp_runtime::traits::Zero; +use sp_std::marker::PhantomData; #[cfg(test)] use std::{any::Any, fmt::Debug}; @@ -88,7 +88,7 @@ pub struct GasMeter { impl GasMeter where - T::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]> + T::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]>, { pub fn new(gas_limit: Weight) -> Self { GasMeter { @@ -107,11 +107,7 @@ where /// /// Passing `0` as amount is interpreted as "all remaining gas". pub fn nested(&mut self, amount: Weight) -> Result { - let amount = if amount == 0 { - self.gas_left - } else { - amount - }; + let amount = if amount == 0 { self.gas_left } else { amount }; // NOTE that it is ok to allocate all available gas since it still ensured // by `charge` that it doesn't reach zero. @@ -155,10 +151,8 @@ where #[cfg(test)] { // Unconditionally add the token to the storage. - let erased_tok = ErasedToken { - description: format!("{:?}", token), - token: Box::new(token), - }; + let erased_tok = + ErasedToken { description: format!("{:?}", token), token: Box::new(token) }; self.tokens.push(erased_tok); } @@ -277,7 +271,9 @@ mod tests { #[derive(Copy, Clone, PartialEq, Eq, Debug)] struct SimpleToken(u64); impl Token for SimpleToken { - fn weight(&self) -> u64 { self.0 } + fn weight(&self) -> u64 { + self.0 + } } #[test] @@ -318,7 +314,6 @@ mod tests { assert!(gas_meter.charge(SimpleToken(1)).is_err()); } - // Charging the exact amount that the user paid for should be // possible. #[test] diff --git a/substrate/frame/contracts/src/lib.rs b/substrate/frame/contracts/src/lib.rs index 116ca6ce18881dfc1346110e1e58f180b0777b78..a3a3311fa9beec951c299c63491ab61289a97a5a 100644 --- a/substrate/frame/contracts/src/lib.rs +++ b/substrate/frame/contracts/src/lib.rs @@ -78,17 +78,17 @@ //! WebAssembly based smart contracts in the Rust programming language. This is a work in progress. #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "runtime-benchmarks", recursion_limit="512")] +#![cfg_attr(feature = "runtime-benchmarks", recursion_limit = "512")] #[macro_use] mod gas; -mod storage; +mod benchmarking; mod exec; -mod wasm; +mod migration; mod rent; -mod benchmarking; mod schedule; -mod migration; +mod storage; +mod wasm; pub mod chain_extension; pub mod weights; @@ -97,49 +97,48 @@ pub mod weights; mod tests; pub use crate::{ - pallet::*, - schedule::{Schedule, Limits, InstructionWeights, HostFnWeights}, exec::Frame, + pallet::*, + schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, }; use crate::{ + exec::{Executable, Stack as ExecStack}, gas::GasMeter, - exec::{Stack as ExecStack, Executable}, rent::Rent, - storage::{Storage, DeletedContract, ContractInfo, AliveContractInfo, TombstoneContractInfo}, - weights::WeightInfo, + storage::{AliveContractInfo, ContractInfo, DeletedContract, Storage, TombstoneContractInfo}, wasm::PrefabWasmModule, -}; -use sp_core::{Bytes, crypto::UncheckedFrom}; -use sp_std::prelude::*; -use sp_runtime::{ - traits::{ - Hash, StaticLookup, Convert, Saturating, Zero, - }, - Perbill, + weights::WeightInfo, }; use frame_support::{ - traits::{OnUnbalanced, Currency, Get, Time, Randomness, Filter}, - weights::{Weight, PostDispatchInfo, WithPostDispatchInfo, GetDispatchInfo}, dispatch::Dispatchable, + traits::{Currency, Filter, Get, OnUnbalanced, Randomness, Time}, + weights::{GetDispatchInfo, PostDispatchInfo, Weight, WithPostDispatchInfo}, }; use frame_system::Pallet as System; use pallet_contracts_primitives::{ - RentProjectionResult, GetStorageResult, ContractAccessError, ContractExecResult, - ContractInstantiateResult, Code, InstantiateReturnValue, + Code, ContractAccessError, ContractExecResult, ContractInstantiateResult, GetStorageResult, + InstantiateReturnValue, RentProjectionResult, +}; +use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_runtime::{ + traits::{Convert, Hash, Saturating, StaticLookup, Zero}, + Perbill, }; +use sp_std::prelude::*; type CodeHash = ::Hash; type TrieId = Vec; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { @@ -156,11 +155,10 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// The overarching call type. - type Call: - Dispatchable + - GetDispatchInfo + - codec::Decode + - IsType<::Call>; + type Call: Dispatchable + + GetDispatchInfo + + codec::Decode + + IsType<::Call>; /// Filter that is applied to calls dispatched by contracts. /// @@ -263,7 +261,7 @@ pub mod pallet { /// The allowed depth is `CallStack::size() + 1`. /// Therefore a size of `0` means that a contract cannot use call or instantiate. /// In other words only the origin called "root contract" is allowed to execute then. - type CallStack: smallvec::Array>; + type CallStack: smallvec::Array>; /// The maximum number of tries that can be queued for deletion. #[pallet::constant] @@ -286,7 +284,8 @@ pub mod pallet { fn on_initialize(_block: T::BlockNumber) -> Weight { // We do not want to go above the block limit and rather avoid lazy deletion // in that case. This should only happen on runtime upgrades. - let weight_limit = T::BlockWeights::get().max_block + let weight_limit = T::BlockWeights::get() + .max_block .saturating_sub(System::::block_weight().total()) .min(T::DeletionWeightLimit::get()); Storage::::process_deletion_queue_batch(weight_limit) @@ -317,14 +316,20 @@ pub mod pallet { dest: ::Source, #[pallet::compact] value: BalanceOf, #[pallet::compact] gas_limit: Weight, - data: Vec + data: Vec, ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut gas_meter = GasMeter::new(gas_limit); let schedule = T::Schedule::get(); let result = ExecStack::>::run_call( - origin, dest, &mut gas_meter, &schedule, value, data, None, + origin, + dest, + &mut gas_meter, + &schedule, + value, + data, + None, ); gas_meter.into_dispatch_result(result, T::WeightInfo::call()) } @@ -374,11 +379,19 @@ pub mod pallet { let code_len = executable.code_len(); ensure!(code_len <= T::Schedule::get().limits.code_len, Error::::CodeTooLarge); let result = ExecStack::>::run_instantiate( - origin, executable, &mut gas_meter, &schedule, endowment, data, &salt, None, - ).map(|(_address, output)| output); + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + None, + ) + .map(|(_address, output)| output); gas_meter.into_dispatch_result( result, - T::WeightInfo::instantiate_with_code(code_len / 1024, salt.len() as u32 / 1024) + T::WeightInfo::instantiate_with_code(code_len / 1024, salt.len() as u32 / 1024), ) } @@ -403,12 +416,18 @@ pub mod pallet { let schedule = T::Schedule::get(); let executable = PrefabWasmModule::from_storage(code_hash, &schedule, &mut gas_meter)?; let result = ExecStack::>::run_instantiate( - origin, executable, &mut gas_meter, &schedule, endowment, data, &salt, None, - ).map(|(_address, output)| output); - gas_meter.into_dispatch_result( - result, - T::WeightInfo::instantiate(salt.len() as u32 / 1024), + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + None, ) + .map(|(_address, output)| output); + gas_meter + .into_dispatch_result(result, T::WeightInfo::instantiate(salt.len() as u32 / 1024)) } /// Allows block producers to claim a small reward for evicting a contract. If a block @@ -424,44 +443,33 @@ pub mod pallet { pub fn claim_surcharge( origin: OriginFor, dest: T::AccountId, - aux_sender: Option + aux_sender: Option, ) -> DispatchResultWithPostInfo { let origin = origin.into(); let (signed, rewarded) = match (origin, aux_sender) { - (Ok(frame_system::RawOrigin::Signed(account)), None) => { - (true, account) - }, - (Ok(frame_system::RawOrigin::None), Some(aux_sender)) => { - (false, aux_sender) - }, + (Ok(frame_system::RawOrigin::Signed(account)), None) => (true, account), + (Ok(frame_system::RawOrigin::None), Some(aux_sender)) => (false, aux_sender), _ => Err(Error::::InvalidSurchargeClaim)?, }; // Add some advantage for block producers (who send unsigned extrinsics) by // adding a handicap: for signed extrinsics we use a slightly older block number // for the eviction check. This can be viewed as if we pushed regular users back in past. - let handicap = if signed { - T::SignedClaimHandicap::get() - } else { - Zero::zero() - }; + let handicap = if signed { T::SignedClaimHandicap::get() } else { Zero::zero() }; // If poking the contract has lead to eviction of the contract, give out the rewards. match Rent::>::try_eviction(&dest, handicap)? { - (Some(rent_paid), code_len) => { - T::Currency::deposit_into_existing( - &rewarded, - T::SurchargeReward::get().min(rent_paid), - ) - .map(|_| PostDispatchInfo { - actual_weight: Some(T::WeightInfo::claim_surcharge(code_len / 1024)), - pays_fee: Pays::No, - }) - .map_err(Into::into) - } - (None, code_len) => Err(Error::::ContractNotEvictable.with_weight( - T::WeightInfo::claim_surcharge(code_len / 1024) - )), + (Some(rent_paid), code_len) => T::Currency::deposit_into_existing( + &rewarded, + T::SurchargeReward::get().min(rent_paid), + ) + .map(|_| PostDispatchInfo { + actual_weight: Some(T::WeightInfo::claim_surcharge(code_len / 1024)), + pays_fee: Pays::No, + }) + .map_err(Into::into), + (None, code_len) => Err(Error::::ContractNotEvictable + .with_weight(T::WeightInfo::claim_surcharge(code_len / 1024))), } } } @@ -638,7 +646,8 @@ pub mod pallet { /// A mapping between an original code hash and instrumented wasm code, ready for execution. #[pallet::storage] - pub(crate) type CodeStorage = StorageMap<_, Identity, CodeHash, PrefabWasmModule>; + pub(crate) type CodeStorage = + StorageMap<_, Identity, CodeHash, PrefabWasmModule>; /// The subtrie counter. #[pallet::storage] @@ -648,7 +657,8 @@ pub mod pallet { /// /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. #[pallet::storage] - pub(crate) type ContractInfoOf = StorageMap<_, Twox64Concat, T::AccountId, ContractInfo>; + pub(crate) type ContractInfoOf = + StorageMap<_, Twox64Concat, T::AccountId, ContractInfo>; /// Evicted contracts that await child trie deletion. /// @@ -684,13 +694,15 @@ where ) -> ContractExecResult { let mut gas_meter = GasMeter::new(gas_limit); let schedule = T::Schedule::get(); - let mut debug_message = if debug { - Some(Vec::new()) - } else { - None - }; + let mut debug_message = if debug { Some(Vec::new()) } else { None }; let result = ExecStack::>::run_call( - origin, dest, &mut gas_meter, &schedule, value, input_data, debug_message.as_mut(), + origin, + dest, + &mut gas_meter, + &schedule, + value, + input_data, + debug_message.as_mut(), ); ContractExecResult { result: result.map_err(|r| r.error), @@ -734,34 +746,36 @@ where }; let executable = match executable { Ok(executable) => executable, - Err(error) => return ContractInstantiateResult { - result: Err(error.into()), - gas_consumed: gas_meter.gas_consumed(), - gas_required: gas_meter.gas_required(), - debug_message: Vec::new(), - } - }; - let mut debug_message = if debug { - Some(Vec::new()) - } else { - None + Err(error) => + return ContractInstantiateResult { + result: Err(error.into()), + gas_consumed: gas_meter.gas_consumed(), + gas_required: gas_meter.gas_required(), + debug_message: Vec::new(), + }, }; + let mut debug_message = if debug { Some(Vec::new()) } else { None }; let result = ExecStack::>::run_instantiate( - origin, executable, &mut gas_meter, &schedule, - endowment, data, &salt, debug_message.as_mut(), - ).and_then(|(account_id, result)| { + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + debug_message.as_mut(), + ) + .and_then(|(account_id, result)| { let rent_projection = if compute_projection { - Some(Rent::>::compute_projection(&account_id) - .map_err(|_| >::NewContractNotFunded)?) + Some( + Rent::>::compute_projection(&account_id) + .map_err(|_| >::NewContractNotFunded)?, + ) } else { None }; - Ok(InstantiateReturnValue { - result, - account_id, - rent_projection, - }) + Ok(InstantiateReturnValue { result, account_id, rent_projection }) }); ContractInstantiateResult { result: result.map_err(|e| e.error), @@ -800,9 +814,10 @@ where deploying_address: &T::AccountId, code_hash: &CodeHash, salt: &[u8], - ) -> T::AccountId - { - let buf: Vec<_> = deploying_address.as_ref().iter() + ) -> T::AccountId { + let buf: Vec<_> = deploying_address + .as_ref() + .iter() .chain(code_hash.as_ref()) .chain(salt) .cloned() @@ -847,7 +862,7 @@ where #[cfg(feature = "runtime-benchmarks")] fn reinstrument_module( module: &mut PrefabWasmModule, - schedule: &Schedule + schedule: &Schedule, ) -> frame_support::dispatch::DispatchResult { self::wasm::reinstrument(module, schedule) } diff --git a/substrate/frame/contracts/src/migration.rs b/substrate/frame/contracts/src/migration.rs index 8c5c06fde7ab133f7351f563e96bf3db1520a228..a28cb87bb60bde66c10f83bd3c53841ade73b6ef 100644 --- a/substrate/frame/contracts/src/migration.rs +++ b/substrate/frame/contracts/src/migration.rs @@ -15,10 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Config, Weight, Pallet}; +use crate::{Config, Pallet, Weight}; use frame_support::{ storage::migration, - traits::{GetPalletVersion, PalletVersion, PalletInfoAccess, Get}, + traits::{Get, GetPalletVersion, PalletInfoAccess, PalletVersion}, }; pub fn migrate() -> Weight { @@ -32,7 +32,7 @@ pub fn migrate() -> Weight { b"CurrentSchedule", b"", ); - } + }, _ => (), } diff --git a/substrate/frame/contracts/src/rent.rs b/substrate/frame/contracts/src/rent.rs index 3135862e88c903f3efe0ba13edb46ddfed62a09a..9446b027ec1f44b2f15169de6f5c00894d77ba25 100644 --- a/substrate/frame/contracts/src/rent.rs +++ b/substrate/frame/contracts/src/rent.rs @@ -18,23 +18,23 @@ //! A module responsible for computing the right amount of weight and charging it. use crate::{ - AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Pallet, Event, - TombstoneContractInfo, Config, CodeHash, Error, - storage::Storage, wasm::PrefabWasmModule, exec::Executable, gas::GasMeter, + exec::Executable, gas::GasMeter, storage::Storage, wasm::PrefabWasmModule, AliveContractInfo, + BalanceOf, CodeHash, Config, ContractInfo, ContractInfoOf, Error, Event, Pallet, + TombstoneContractInfo, }; -use sp_std::prelude::*; -use sp_io::hashing::blake2_256; -use sp_core::crypto::UncheckedFrom; use frame_support::{ storage::child, traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReasons}, DefaultNoBound, }; use pallet_contracts_primitives::{ContractAccessError, RentProjection, RentProjectionResult}; +use sp_core::crypto::UncheckedFrom; +use sp_io::hashing::blake2_256; use sp_runtime::{ - DispatchError, traits::{Bounded, CheckedDiv, CheckedMul, SaturatedConversion, Saturating, Zero}, + DispatchError, }; +use sp_std::prelude::*; /// Information about the required deposit and resulting rent. /// @@ -83,13 +83,8 @@ where code_size: u32, ) -> Result>, DispatchError> { let current_block_number = >::block_number(); - let verdict = Self::consider_case( - account, - current_block_number, - Zero::zero(), - &contract, - code_size, - ); + let verdict = + Self::consider_case(account, current_block_number, Zero::zero(), &contract, code_size); Self::enact_verdict(account, contract, current_block_number, verdict, None) } @@ -136,10 +131,14 @@ where .unwrap_or_else(|| >::zero()) .saturating_add(contract.rent_paid); Self::enact_verdict( - account, contract, current_block_number, verdict, Some(module), + account, + contract, + current_block_number, + verdict, + Some(module), )?; Ok((Some(rent_paid), code_len)) - } + }, _ => Ok((None, code_len)), } } @@ -155,9 +154,7 @@ where /// NOTE that this is not a side-effect free function! It will actually collect rent and then /// compute the projection. This function is only used for implementation of an RPC method through /// `RuntimeApi` meaning that the changes will be discarded anyway. - pub fn compute_projection( - account: &T::AccountId, - ) -> RentProjectionResult { + pub fn compute_projection(account: &T::AccountId) -> RentProjectionResult { use ContractAccessError::IsTombstone; let contract_info = >::get(account); @@ -179,45 +176,42 @@ where // We skip the eviction in case one is in order. // Evictions should only be performed by [`try_eviction`]. - let new_contract_info = Self::enact_verdict( - account, alive_contract_info, current_block_number, verdict, None, - ); + let new_contract_info = + Self::enact_verdict(account, alive_contract_info, current_block_number, verdict, None); // Check what happened after enaction of the verdict. - let alive_contract_info = new_contract_info.map_err(|_| IsTombstone)?.ok_or_else(|| IsTombstone)?; + let alive_contract_info = + new_contract_info.map_err(|_| IsTombstone)?.ok_or_else(|| IsTombstone)?; // Compute how much would the fee per block be with the *updated* balance. let total_balance = T::Currency::total_balance(account); let free_balance = T::Currency::free_balance(account); - let fee_per_block = Self::fee_per_block( - &free_balance, &alive_contract_info, code_size, - ); + let fee_per_block = Self::fee_per_block(&free_balance, &alive_contract_info, code_size); if fee_per_block.is_zero() { - return Ok(RentProjection::NoEviction); + return Ok(RentProjection::NoEviction) } // Then compute how much the contract will sustain under these circumstances. - let rent_budget = Self::rent_budget(&total_balance, &free_balance, &alive_contract_info).expect( - "the contract exists and in the alive state; + let rent_budget = Self::rent_budget(&total_balance, &free_balance, &alive_contract_info) + .expect( + "the contract exists and in the alive state; the updated balance must be greater than subsistence deposit; this function doesn't return `None`; qed ", - ); + ); let blocks_left = match rent_budget.checked_div(&fee_per_block) { Some(blocks_left) => blocks_left, None => { // `fee_per_block` is not zero here, so `checked_div` can return `None` if // there is an overflow. This cannot happen with integers though. Return // `NoEviction` here just in case. - return Ok(RentProjection::NoEviction); - } + return Ok(RentProjection::NoEviction) + }, }; let blocks_left = blocks_left.saturated_into::().into(); - Ok(RentProjection::EvictionAt( - current_block_number + blocks_left, - )) + Ok(RentProjection::EvictionAt(current_block_number + blocks_left)) } /// Restores the destination account using the origin as prototype. @@ -246,18 +240,15 @@ where let current_block = >::block_number(); if origin_contract.last_write == Some(current_block) { - return Err(Error::::InvalidContractOrigin.into()); + return Err(Error::::InvalidContractOrigin.into()) } let dest_tombstone = >::get(&dest) .and_then(|c| c.get_tombstone()) .ok_or(Error::::InvalidDestinationContract)?; - let last_write = if !delta.is_empty() { - Some(current_block) - } else { - origin_contract.last_write - }; + let last_write = + if !delta.is_empty() { Some(current_block) } else { origin_contract.last_write }; // Fails if the code hash does not exist on chain E::add_user(code_hash, gas_meter)?; @@ -266,7 +257,8 @@ where // fail later due to tombstones not matching. This is because the restoration // is always called from a contract and therefore in a storage transaction. // The failure of this function will lead to this transaction's rollback. - let bytes_taken: u32 = delta.iter() + let bytes_taken: u32 = delta + .iter() .filter_map(|key| { let key = blake2_256(key); child::get_raw(&child_trie_info, &key).map(|value| { @@ -284,21 +276,24 @@ where ); if tombstone != dest_tombstone { - return Err(Error::::InvalidTombstone.into()); + return Err(Error::::InvalidTombstone.into()) } origin_contract.storage_size -= bytes_taken; >::remove(&origin); E::remove_user(origin_contract.code_hash, gas_meter)?; - >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { - code_hash, - rent_allowance, - rent_paid: >::zero(), - deduct_block: current_block, - last_write, - .. origin_contract - })); + >::insert( + &dest, + ContractInfo::Alive(AliveContractInfo:: { + code_hash, + rent_allowance, + rent_paid: >::zero(), + deduct_block: current_block, + last_write, + ..origin_contract + }), + ); let origin_free_balance = T::Currency::free_balance(&origin); T::Currency::make_free_balance_be(&origin, >::zero()); @@ -314,42 +309,34 @@ where current_refcount: u32, at_refcount: u32, ) -> RentStatus { - let calc_share = |refcount: u32| { - aggregated_code_size.checked_div(refcount).unwrap_or(0) - }; + let calc_share = |refcount: u32| aggregated_code_size.checked_div(refcount).unwrap_or(0); let current_share = calc_share(current_refcount); let custom_share = calc_share(at_refcount); RentStatus { max_deposit: Self::required_deposit(contract, aggregated_code_size), current_deposit: Self::required_deposit(contract, current_share), - custom_refcount_deposit: - if at_refcount > 0 { - Some(Self::required_deposit(contract, custom_share)) - } else { - None - }, + custom_refcount_deposit: if at_refcount > 0 { + Some(Self::required_deposit(contract, custom_share)) + } else { + None + }, max_rent: Self::fee_per_block(free_balance, contract, aggregated_code_size), current_rent: Self::fee_per_block(free_balance, contract, current_share), - custom_refcount_rent: - if at_refcount > 0 { - Some(Self::fee_per_block(free_balance, contract, custom_share)) - } else { - None - }, + custom_refcount_rent: if at_refcount > 0 { + Some(Self::fee_per_block(free_balance, contract, custom_share)) + } else { + None + }, _reserved: None, } } /// Returns how much deposit is required to not pay rent. - fn required_deposit( - contract: &AliveContractInfo, - code_size_share: u32, - ) -> BalanceOf { + fn required_deposit(contract: &AliveContractInfo, code_size_share: u32) -> BalanceOf { T::DepositPerStorageByte::get() .saturating_mul(contract.storage_size.saturating_add(code_size_share).into()) .saturating_add( - T::DepositPerStorageItem::get() - .saturating_mul(contract.pair_count.into()) + T::DepositPerStorageItem::get().saturating_mul(contract.pair_count.into()), ) .saturating_add(T::DepositPerContract::get()) } @@ -363,8 +350,8 @@ where contract: &AliveContractInfo, code_size_share: u32, ) -> BalanceOf { - let missing_deposit = Self::required_deposit(contract, code_size_share) - .saturating_sub(*free_balance); + let missing_deposit = + Self::required_deposit(contract, code_size_share).saturating_sub(*free_balance); T::RentFraction::get().mul_ceil(missing_deposit) } @@ -383,16 +370,13 @@ where // Reserved balance contributes towards the subsistence threshold to stay consistent // with the existential deposit where the reserved balance is also counted. if *total_balance < subsistence_threshold { - return None; + return None } // However, reserved balance cannot be charged so we need to use the free balance // to calculate the actual budget (which can be 0). let rent_allowed_to_charge = free_balance.saturating_sub(subsistence_threshold); - Some(>::min( - contract.rent_allowance, - rent_allowed_to_charge, - )) + Some(>::min(contract.rent_allowance, rent_allowed_to_charge)) } /// Consider the case for rent payment of the given account and returns a `Verdict`. @@ -414,7 +398,7 @@ where }; if blocks_passed.is_zero() { // Rent has already been paid - return Verdict::Exempt; + return Verdict::Exempt } let total_balance = T::Currency::total_balance(account); @@ -425,7 +409,7 @@ where if fee_per_block.is_zero() { // The rent deposit offset reduced the fee to 0. This means that the contract // gets the rent for free. - return Verdict::Exempt; + return Verdict::Exempt } let rent_budget = match Self::rent_budget(&total_balance, &free_balance, contract) { @@ -443,7 +427,7 @@ where account, ); 0u32.into() - } + }, }; let dues = fee_per_block @@ -469,18 +453,15 @@ where if insufficient_rent || !can_withdraw_rent { // The contract cannot afford the rent payment and has a balance above the subsistence // threshold, so it leaves a tombstone. - let amount = if can_withdraw_rent { - Some(OutstandingAmount::new(dues_limited)) - } else { - None - }; - return Verdict::Evict { amount }; + let amount = + if can_withdraw_rent { Some(OutstandingAmount::new(dues_limited)) } else { None }; + return Verdict::Evict { amount } } return Verdict::Charge { // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. amount: OutstandingAmount::new(dues_limited), - }; + } } /// Enacts the given verdict and returns the updated `ContractInfo`. @@ -511,9 +492,7 @@ where } // Note: this operation is heavy. - let child_storage_root = child::root( - &alive_contract_info.child_trie_info(), - ); + let child_storage_root = child::root(&alive_contract_info.child_trie_info()); let tombstone = >::new( &child_storage_root[..], @@ -524,11 +503,9 @@ where code.drop_from_storage(); >::deposit_event(Event::Evicted(account.clone())); Ok(None) - } - (Verdict::Evict { amount: _ }, None) => { - Ok(None) - } - (Verdict::Exempt, _) => { + }, + (Verdict::Evict { amount: _ }, None) => Ok(None), + (Verdict::Exempt, _) => { let contract = ContractInfo::Alive(AliveContractInfo:: { deduct_block: current_block_number, ..alive_contract_info @@ -546,11 +523,9 @@ where >::insert(account, &contract); amount.withdraw(account); Ok(Some(contract.get_alive().expect("We just constructed it as alive. qed"))) - } + }, } } - - } /// The amount to charge. @@ -596,9 +571,7 @@ enum Verdict { Exempt, /// The contract cannot afford payment within its rent budget so it gets evicted. However, /// because its balance is greater than the subsistence threshold it leaves a tombstone. - Evict { - amount: Option>, - }, + Evict { amount: Option> }, /// Everything is OK, we just only take some charge. Charge { amount: OutstandingAmount }, } diff --git a/substrate/frame/contracts/src/schedule.rs b/substrate/frame/contracts/src/schedule.rs index 0abe0c54d7481cdb7a494c08de66e069b365f248..a1118633bfde91565d4bbc5a6d9bd9a76c47a144 100644 --- a/substrate/frame/contracts/src/schedule.rs +++ b/substrate/frame/contracts/src/schedule.rs @@ -18,16 +18,16 @@ //! This module contains the cost schedule and supporting code that constructs a //! sane default schedule from a `WeightInfo` implementation. -use crate::{Config, weights::WeightInfo}; +use crate::{weights::WeightInfo, Config}; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use codec::{Decode, Encode}; +use frame_support::{weights::Weight, DefaultNoBound}; use pallet_contracts_proc_macro::{ScheduleDebug, WeightDebug}; -use frame_support::{DefaultNoBound, weights::Weight}; -use sp_std::{marker::PhantomData, vec::Vec}; -use codec::{Encode, Decode}; use pwasm_utils::{parity_wasm::elements, rules}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; use sp_runtime::RuntimeDebug; +use sp_std::{marker::PhantomData, vec::Vec}; /// How many API calls are executed in a single batch. The reason for increasing the amount /// of API calls in batches (per benchmark component increase) is so that the linear regression @@ -50,18 +50,18 @@ pub const INSTR_BENCHMARK_BATCH_SIZE: u32 = 1_000; /// fn create_schedule() -> Schedule { /// Schedule { /// limits: Limits { -/// globals: 3, -/// parameters: 3, -/// memory_pages: 16, -/// table_size: 3, -/// br_table_size: 3, -/// .. Default::default() -/// }, +/// globals: 3, +/// parameters: 3, +/// memory_pages: 16, +/// table_size: 3, +/// br_table_size: 3, +/// .. Default::default() +/// }, /// instruction_weights: InstructionWeights { -/// version: 5, +/// version: 5, /// .. Default::default() /// }, -/// .. Default::default() +/// .. Default::default() /// } /// } /// ``` @@ -392,11 +392,13 @@ pub struct HostFnWeights { /// The type parameter is used in the default implementation. #[codec(skip)] - pub _phantom: PhantomData + pub _phantom: PhantomData, } macro_rules! replace_token { - ($_in:tt $replacement:tt) => { $replacement }; + ($_in:tt $replacement:tt) => { + $replacement + }; } macro_rules! call_zero { @@ -420,20 +422,22 @@ macro_rules! cost_batched_args { macro_rules! cost_instr_no_params_with_batch_size { ($name:ident, $batch_size:expr) => { (cost_args!($name, 1) / Weight::from($batch_size)) as u32 - } + }; } macro_rules! cost_instr_with_batch_size { ($name:ident, $num_params:expr, $batch_size:expr) => { - cost_instr_no_params_with_batch_size!($name, $batch_size) - .saturating_sub((cost_instr_no_params_with_batch_size!(instr_i64const, $batch_size) / 2).saturating_mul($num_params)) - } + cost_instr_no_params_with_batch_size!($name, $batch_size).saturating_sub( + (cost_instr_no_params_with_batch_size!(instr_i64const, $batch_size) / 2) + .saturating_mul($num_params), + ) + }; } macro_rules! cost_instr { ($name:ident, $num_params:expr) => { cost_instr_with_batch_size!($name, $num_params, INSTR_BENCHMARK_BATCH_SIZE) - } + }; } macro_rules! cost_byte_args { @@ -451,25 +455,25 @@ macro_rules! cost_byte_batched_args { macro_rules! cost { ($name:ident) => { cost_args!($name, 1) - } + }; } macro_rules! cost_batched { ($name:ident) => { cost_batched_args!($name, 1) - } + }; } macro_rules! cost_byte { ($name:ident) => { cost_byte_args!($name, 1) - } + }; } macro_rules! cost_byte_batched { ($name:ident) => { cost_byte_batched_args!($name, 1) - } + }; } impl Default for Limits { @@ -578,7 +582,11 @@ impl Default for HostFnWeights { random: cost_batched!(seal_random), deposit_event: cost_batched!(seal_deposit_event), deposit_event_per_topic: cost_batched_args!(seal_deposit_event_per_topic_and_kb, 1, 0), - deposit_event_per_byte: cost_byte_batched_args!(seal_deposit_event_per_topic_and_kb, 0, 1), + deposit_event_per_byte: cost_byte_batched_args!( + seal_deposit_event_per_topic_and_kb, + 0, + 1 + ), debug_message: cost_batched!(seal_debug_message), set_rent_allowance: cost_batched!(seal_set_rent_allowance), set_storage: cost_batched!(seal_set_storage), @@ -588,13 +596,43 @@ impl Default for HostFnWeights { get_storage_per_byte: cost_byte_batched!(seal_get_storage_per_kb), transfer: cost_batched!(seal_transfer), call: cost_batched!(seal_call), - call_transfer_surcharge: cost_batched_args!(seal_call_per_transfer_input_output_kb, 1, 0, 0), - call_per_input_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 1, 0), - call_per_output_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 0, 1), + call_transfer_surcharge: cost_batched_args!( + seal_call_per_transfer_input_output_kb, + 1, + 0, + 0 + ), + call_per_input_byte: cost_byte_batched_args!( + seal_call_per_transfer_input_output_kb, + 0, + 1, + 0 + ), + call_per_output_byte: cost_byte_batched_args!( + seal_call_per_transfer_input_output_kb, + 0, + 0, + 1 + ), instantiate: cost_batched!(seal_instantiate), - instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 1, 0, 0), - instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 1, 0), - instantiate_per_salt_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 0, 1), + instantiate_per_input_byte: cost_byte_batched_args!( + seal_instantiate_per_input_output_salt_kb, + 1, + 0, + 0 + ), + instantiate_per_output_byte: cost_byte_batched_args!( + seal_instantiate_per_input_output_salt_kb, + 0, + 1, + 0 + ), + instantiate_per_salt_byte: cost_byte_batched_args!( + seal_instantiate_per_input_output_salt_kb, + 0, + 0, + 1 + ), hash_sha2_256: cost_batched!(seal_hash_sha2_256), hash_sha2_256_per_byte: cost_byte_batched!(seal_hash_sha2_256_per_kb), hash_keccak_256: cost_batched!(seal_hash_keccak_256), @@ -625,7 +663,7 @@ impl Schedule { let elements::Type::Function(func) = func; func.params().len() as u32 }) - .collect() + .collect(), } } } @@ -639,12 +677,25 @@ impl<'a, T: Config> rules::Rules for ScheduleRules<'a, T> { let weight = match *instruction { End | Unreachable | Return | Else => 0, I32Const(_) | I64Const(_) | Block(_) | Loop(_) | Nop | Drop => w.i64const, - I32Load(_, _) | I32Load8S(_, _) | I32Load8U(_, _) | I32Load16S(_, _) | - I32Load16U(_, _) | I64Load(_, _) | I64Load8S(_, _) | I64Load8U(_, _) | - I64Load16S(_, _) | I64Load16U(_, _) | I64Load32S(_, _) | I64Load32U(_, _) - => w.i64load, - I32Store(_, _) | I32Store8(_, _) | I32Store16(_, _) | I64Store(_, _) | - I64Store8(_, _) | I64Store16(_, _) | I64Store32(_, _) => w.i64store, + I32Load(_, _) | + I32Load8S(_, _) | + I32Load8U(_, _) | + I32Load16S(_, _) | + I32Load16U(_, _) | + I64Load(_, _) | + I64Load8S(_, _) | + I64Load8U(_, _) | + I64Load16S(_, _) | + I64Load16U(_, _) | + I64Load32S(_, _) | + I64Load32U(_, _) => w.i64load, + I32Store(_, _) | + I32Store8(_, _) | + I32Store16(_, _) | + I64Store(_, _) | + I64Store8(_, _) | + I64Store16(_, _) | + I64Store32(_, _) => w.i64store, Select => w.select, If(_) => w.r#if, Br(_) => w.br, @@ -658,10 +709,9 @@ impl<'a, T: Config> rules::Rules for ScheduleRules<'a, T> { CurrentMemory(_) => w.memory_current, GrowMemory(_) => w.memory_grow, CallIndirect(idx, _) => *self.params.get(idx as usize).unwrap_or(&max_params), - BrTable(ref data) => - w.br_table.saturating_add( - w.br_table_per_entry.saturating_mul(data.table.len() as u32) - ), + BrTable(ref data) => w + .br_table + .saturating_add(w.br_table_per_entry.saturating_mul(data.table.len() as u32)), I32Clz | I64Clz => w.i64clz, I32Ctz | I64Ctz => w.i64ctz, I32Popcnt | I64Popcnt => w.i64popcnt, @@ -711,8 +761,8 @@ impl<'a, T: Config> rules::Rules for ScheduleRules<'a, T> { #[cfg(test)] mod test { - use crate::tests::Test; use super::*; + use crate::tests::Test; #[test] fn print_test_schedule() { diff --git a/substrate/frame/contracts/src/storage.rs b/substrate/frame/contracts/src/storage.rs index 15782d7d1e45957172d9a64f1d6b1f884d9f1d7a..847b57c89d6b8fe1b6c39913e0f4067e99f7407f 100644 --- a/substrate/frame/contracts/src/storage.rs +++ b/substrate/frame/contracts/src/storage.rs @@ -19,29 +19,30 @@ use crate::{ exec::{AccountIdOf, StorageKey}, - BalanceOf, CodeHash, ContractInfoOf, Config, TrieId, DeletionQueue, Error, weights::WeightInfo, + BalanceOf, CodeHash, Config, ContractInfoOf, DeletionQueue, Error, TrieId, }; -use codec::{Codec, Encode, Decode}; -use sp_std::prelude::*; -use sp_std::{marker::PhantomData, fmt::Debug}; -use sp_io::hashing::blake2_256; -use sp_runtime::{ - RuntimeDebug, - traits::{Bounded, Saturating, Zero, Hash, Member, MaybeSerializeDeserialize}, -}; -use sp_core::crypto::UncheckedFrom; +use codec::{Codec, Decode, Encode}; use frame_support::{ dispatch::{DispatchError, DispatchResult}, - storage::child::{self, KillStorageResult, ChildInfo}, + storage::child::{self, ChildInfo, KillStorageResult}, traits::Get, weights::Weight, }; +use sp_core::crypto::UncheckedFrom; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + traits::{Bounded, Hash, MaybeSerializeDeserialize, Member, Saturating, Zero}, + RuntimeDebug, +}; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; pub type AliveContractInfo = RawAliveContractInfo, BalanceOf, ::BlockNumber>; -pub type TombstoneContractInfo = - RawTombstoneContractInfo<::Hash, ::Hashing>; +pub type TombstoneContractInfo = RawTombstoneContractInfo< + ::Hash, + ::Hashing, +>; /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account @@ -126,10 +127,16 @@ pub struct RawTombstoneContractInfo(H, PhantomData); impl RawTombstoneContractInfo where - H: Member + MaybeSerializeDeserialize+ Debug - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default - + sp_std::hash::Hash + Codec, - Hasher: Hash, + H: Member + + MaybeSerializeDeserialize + + Debug + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default + + sp_std::hash::Hash + + Codec, + Hasher: Hash, { pub fn new(storage_root: &[u8], code_hash: H) -> Self { let mut buf = Vec::new(); @@ -156,7 +163,7 @@ pub struct Storage(PhantomData); impl Storage where T: Config, - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { /// Reads a storage kv pair of a contract. /// @@ -187,11 +194,15 @@ where // Update the total number of KV pairs and the number of empty pairs. match (&opt_prev_len, &opt_new_value) { (Some(_), None) => { - new_info.pair_count = new_info.pair_count.checked_sub(1) + new_info.pair_count = new_info + .pair_count + .checked_sub(1) .ok_or_else(|| Error::::StorageExhausted)?; }, (None, Some(_)) => { - new_info.pair_count = new_info.pair_count.checked_add(1) + new_info.pair_count = new_info + .pair_count + .checked_add(1) .ok_or_else(|| Error::::StorageExhausted)?; }, (Some(_), Some(_)) => {}, @@ -200,10 +211,8 @@ where // Update the total storage size. let prev_value_len = opt_prev_len.unwrap_or(0); - let new_value_len = opt_new_value - .as_ref() - .map(|new_value| new_value.len() as u32) - .unwrap_or(0); + let new_value_len = + opt_new_value.as_ref().map(|new_value| new_value.len() as u32).unwrap_or(0); new_info.storage_size = new_info .storage_size .checked_sub(prev_value_len) @@ -230,7 +239,7 @@ where ch: CodeHash, ) -> Result, DispatchError> { if >::contains_key(account) { - return Err(Error::::DuplicateContract.into()); + return Err(Error::::DuplicateContract.into()) } let contract = AliveContractInfo:: { @@ -297,19 +306,17 @@ where pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { let queue_len = >::decode_len().unwrap_or(0); if queue_len == 0 { - return weight_limit; + return weight_limit } - let (weight_per_key, mut remaining_key_budget) = Self::deletion_budget( - queue_len, - weight_limit, - ); + let (weight_per_key, mut remaining_key_budget) = + Self::deletion_budget(queue_len, weight_limit); // We want to check whether we have enough weight to decode the queue before // proceeding. Too little weight for decoding might happen during runtime upgrades // which consume the whole block before the other `on_initialize` blocks are called. if remaining_key_budget == 0 { - return weight_limit; + return weight_limit } let mut queue = >::get(); @@ -318,10 +325,8 @@ where // Cannot panic due to loop condition let trie = &mut queue[0]; let pair_count = trie.pair_count; - let outcome = child::kill_storage( - &child_trie_info(&trie.trie_id), - Some(remaining_key_budget), - ); + let outcome = + child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget)); if pair_count > remaining_key_budget { // Cannot underflow because of the if condition trie.pair_count -= remaining_key_budget; @@ -341,8 +346,8 @@ where KillStorageResult::AllRemoved(_) => (), } } - remaining_key_budget = remaining_key_budget - .saturating_sub(remaining_key_budget.min(pair_count)); + remaining_key_budget = + remaining_key_budget.saturating_sub(remaining_key_budget.min(pair_count)); } >::put(queue); @@ -352,29 +357,22 @@ where /// This generator uses inner counter for account id and applies the hash over `AccountId + /// accountid_counter`. pub fn generate_trie_id(account_id: &AccountIdOf, seed: u64) -> TrieId { - let buf: Vec<_> = account_id.as_ref().iter() - .chain(&seed.to_le_bytes()) - .cloned() - .collect(); + let buf: Vec<_> = account_id.as_ref().iter().chain(&seed.to_le_bytes()).cloned().collect(); T::Hashing::hash(&buf).as_ref().into() } /// Returns the code hash of the contract specified by `account` ID. #[cfg(test)] - pub fn code_hash(account: &AccountIdOf) -> Option> - { - >::get(account) - .and_then(|i| i.as_alive().map(|i| i.code_hash)) + pub fn code_hash(account: &AccountIdOf) -> Option> { + >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) } /// Fill up the queue in order to exercise the limits during testing. #[cfg(test)] pub fn fill_queue_with_dummies() { - let queue: Vec<_> = (0..T::DeletionQueueDepth::get()).map(|_| DeletedContract { - pair_count: 0, - trie_id: vec![], - }) - .collect(); + let queue: Vec<_> = (0..T::DeletionQueueDepth::get()) + .map(|_| DeletedContract { pair_count: 0, trie_id: vec![] }) + .collect(); >::put(queue); } } diff --git a/substrate/frame/contracts/src/tests.rs b/substrate/frame/contracts/src/tests.rs index ea5fbccb0f2a1dc11a0e150a32a04c7c47bc923c..f8528c3dbe7cc6c2a2b2544d3218794f10b9a479 100644 --- a/substrate/frame/contracts/src/tests.rs +++ b/substrate/frame/contracts/src/tests.rs @@ -16,37 +16,35 @@ // limitations under the License. use crate::{ - BalanceOf, ContractInfo, ContractInfoOf, Pallet, - Config, Schedule, - Error, storage::Storage, chain_extension::{ - Result as ExtensionResult, Environment, ChainExtension, Ext, SysConfig, RetVal, - UncheckedFrom, InitState, ReturnFlags, + ChainExtension, Environment, Ext, InitState, Result as ExtensionResult, RetVal, + ReturnFlags, SysConfig, UncheckedFrom, }, - exec::{AccountIdOf, Executable, Frame}, wasm::PrefabWasmModule, + exec::{AccountIdOf, Executable, Frame}, + storage::{RawAliveContractInfo, Storage}, + wasm::{PrefabWasmModule, ReturnCode as RuntimeReturnCode}, weights::WeightInfo, - wasm::ReturnCode as RuntimeReturnCode, - storage::RawAliveContractInfo, + BalanceOf, Config, ContractInfo, ContractInfoOf, Error, Pallet, Schedule, }; use assert_matches::assert_matches; use codec::Encode; -use sp_core::Bytes; -use sp_runtime::{ - traits::{BlakeTwo256, Hash, IdentityLookup, Convert}, - testing::{Header, H256}, - AccountId32, Perbill, -}; -use sp_io::hashing::blake2_256; use frame_support::{ - assert_ok, assert_err, assert_err_ignore_postinfo, - parameter_types, assert_storage_noop, - traits::{Currency, ReservableCurrency, OnInitialize, Filter}, - weights::{Weight, PostDispatchInfo, DispatchClass, constants::WEIGHT_PER_SECOND}, + assert_err, assert_err_ignore_postinfo, assert_ok, assert_storage_noop, dispatch::DispatchErrorWithPostInfo, + parameter_types, storage::child, + traits::{Currency, Filter, OnInitialize, ReservableCurrency}, + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, PostDispatchInfo, Weight}, }; use frame_system::{self as system, EventRecord, Phase}; use pretty_assertions::assert_eq; +use sp_core::Bytes; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, Convert, Hash, IdentityLookup}, + AccountId32, Perbill, +}; use std::cell::RefCell; use crate as pallet_contracts; @@ -71,23 +69,21 @@ frame_support::construct_runtime!( #[macro_use] pub mod test_utils { - use super::{Test, Balances, System}; + use super::{Balances, System, Test}; use crate::{ - ContractInfoOf, CodeHash, - storage::{Storage, ContractInfo}, - exec::{StorageKey, AccountIdOf}, - Pallet as Contracts, - TrieId, AccountCounter, + exec::{AccountIdOf, StorageKey}, + storage::{ContractInfo, Storage}, + AccountCounter, CodeHash, ContractInfoOf, Pallet as Contracts, TrieId, }; use frame_support::traits::Currency; pub fn set_storage(addr: &AccountIdOf, key: &StorageKey, value: Option>) { - let mut contract_info = >::get(&addr).unwrap().get_alive().unwrap(); + let mut contract_info = >::get(&addr).unwrap().get_alive().unwrap(); let block_number = System::block_number(); Storage::::write(block_number, &mut contract_info, key, value).unwrap(); } pub fn get_storage(addr: &AccountIdOf, key: &StorageKey) -> Option> { - let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); + let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); Storage::::read(&contract_info.trie_id, key) } pub fn generate_trie_id(address: &AccountIdOf) -> TrieId { @@ -114,15 +110,13 @@ pub mod test_utils { ( $x:expr , $y:expr $(,)? ) => {{ use sp_std::convert::TryInto; assert_eq!(u32::from_le_bytes($x.data[..].try_into().unwrap()), $y as u32); - }} + }}; } macro_rules! assert_refcount { ( $code_hash:expr , $should:expr $(,)? ) => {{ - let is = crate::CodeStorage::::get($code_hash) - .map(|m| m.refcount()) - .unwrap_or(0); + let is = crate::CodeStorage::::get($code_hash).map(|m| m.refcount()).unwrap_or(0); assert_eq!(is, $should); - }} + }}; } } @@ -152,11 +146,7 @@ impl TestExtension { impl Default for TestExtension { fn default() -> Self { - Self { - enabled: true, - last_seen_buffer: vec![], - last_seen_inputs: (0, 0, 0, 0), - } + Self { enabled: true, last_seen_buffer: vec![], last_seen_inputs: (0, 0, 0, 0) } } } @@ -176,11 +166,10 @@ impl ChainExtension for TestExtension { }, 1 => { let env = env.only_in(); - TEST_EXTENSION.with(|e| - e.borrow_mut().last_seen_inputs = ( - env.val0(), env.val1(), env.val2(), env.val3() - ) - ); + TEST_EXTENSION.with(|e| { + e.borrow_mut().last_seen_inputs = + (env.val0(), env.val1(), env.val2(), env.val3()) + }); Ok(RetVal::Converging(func_id)) }, 2 => { @@ -189,15 +178,10 @@ impl ChainExtension for TestExtension { env.charge_weight(weight)?; Ok(RetVal::Converging(func_id)) }, - 3 => { - Ok(RetVal::Diverging{ - flags: ReturnFlags::REVERT, - data: vec![42, 99], - }) - }, + 3 => Ok(RetVal::Diverging { flags: ReturnFlags::REVERT, data: vec![42, 99] }), _ => { panic!("Passed unknown func_id to test chain extension: {}", func_id); - } + }, } } @@ -340,9 +324,7 @@ pub struct ExtBuilder { } impl Default for ExtBuilder { fn default() -> Self { - Self { - existential_deposit: 1, - } + Self { existential_deposit: 1 } } } impl ExtBuilder { @@ -356,9 +338,9 @@ impl ExtBuilder { pub fn build(self) -> sp_io::TestExternalities { self.set_associated_consts(); let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![] } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -369,9 +351,7 @@ impl ExtBuilder { /// with it's hash. /// /// The fixture files are located under the `fixtures/` directory. -fn compile_module( - fixture_name: &str, -) -> wat::Result<(Vec, ::Output)> +fn compile_module(fixture_name: &str) -> wat::Result<(Vec, ::Output)> where T: frame_system::Config, { @@ -392,22 +372,20 @@ fn calling_plain_account_fails() { assert_eq!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, Vec::new()), - Err( - DispatchErrorWithPostInfo { - error: Error::::ContractNotFound.into(), - post_info: PostDispatchInfo { - actual_weight: Some(base_cost), - pays_fee: Default::default(), - }, - } - ) + Err(DispatchErrorWithPostInfo { + error: Error::::ContractNotFound.into(), + post_info: PostDispatchInfo { + actual_weight: Some(base_cost), + pays_fee: Default::default(), + }, + }) ); }); } #[test] fn account_removal_does_not_remove_storage() { - use self::test_utils::{set_storage, get_storage}; + use self::test_utils::{get_storage, set_storage}; ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = test_utils::generate_trie_id(&ALICE); @@ -461,23 +439,11 @@ fn account_removal_does_not_remove_storage() { // Verify that no entries are removed. { - assert_eq!( - get_storage(&ALICE, key1), - Some(b"1".to_vec()) - ); - assert_eq!( - get_storage(&ALICE, key2), - Some(b"2".to_vec()) - ); + assert_eq!(get_storage(&ALICE, key1), Some(b"1".to_vec())); + assert_eq!(get_storage(&ALICE, key2), Some(b"2".to_vec())); - assert_eq!( - get_storage(&BOB, key1), - Some(b"3".to_vec()) - ); - assert_eq!( - get_storage(&BOB, key2), - Some(b"4".to_vec()) - ); + assert_eq!(get_storage(&BOB, key1), Some(b"3".to_vec())); + assert_eq!(get_storage(&BOB, key2), Some(b"4".to_vec())); } }); } @@ -486,25 +452,24 @@ fn account_removal_does_not_remove_storage() { fn instantiate_and_call_and_deposit_event() { let (wasm, code_hash) = compile_module::("return_from_start_fn").unwrap(); - ExtBuilder::default() - .existential_deposit(100) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let subsistence = Pallet::::subsistence_threshold(); + ExtBuilder::default().existential_deposit(100).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let subsistence = Pallet::::subsistence_threshold(); - // Check at the end to get hash on error easily - let creation = Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - wasm, - vec![], - vec![], - ); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + // Check at the end to get hash on error easily + let creation = Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm, + vec![], + vec![], + ); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - assert_eq!(System::events(), vec![ + assert_eq!( + System::events(), + vec![ EventRecord { phase: Phase::Initialization, event: Event::System(frame_system::Event::NewAccount(ALICE.clone())), @@ -512,9 +477,7 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Endowed(ALICE, 1_000_000) - ), + event: Event::Balances(pallet_balances::Event::Endowed(ALICE, 1_000_000)), topics: vec![], }, EventRecord { @@ -524,16 +487,19 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Endowed(addr.clone(), subsistence * 100) - ), + event: Event::Balances(pallet_balances::Event::Endowed( + addr.clone(), + subsistence * 100 + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Transfer(ALICE, addr.clone(), subsistence * 100) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + ALICE, + addr.clone(), + subsistence * 100 + )), topics: vec![], }, EventRecord { @@ -543,9 +509,10 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::ContractEmitted(addr.clone(), vec![1, 2, 3, 4]) - ), + event: Event::Contracts(crate::Event::ContractEmitted( + addr.clone(), + vec![1, 2, 3, 4] + )), topics: vec![], }, EventRecord { @@ -553,54 +520,52 @@ fn instantiate_and_call_and_deposit_event() { event: Event::Contracts(crate::Event::Instantiated(ALICE, addr.clone())), topics: vec![], }, - ]); + ] + ); - assert_ok!(creation); - assert!(ContractInfoOf::::contains_key(&addr)); - }); + assert_ok!(creation); + assert!(ContractInfoOf::::contains_key(&addr)); + }); } #[test] fn deposit_event_max_value_limit() { let (wasm, code_hash) = compile_module::("event_size").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Call contract with allowed storage value. - assert_ok!(Contracts::call( + // Call contract with allowed storage value. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT * 2, // we are copying a huge buffer, + ::Schedule::get().limits.payload_len.encode(), + )); + + // Call contract with too large a storage value. + assert_err_ignore_postinfo!( + Contracts::call( Origin::signed(ALICE), - addr.clone(), + addr, 0, - GAS_LIMIT * 2, // we are copying a huge buffer, - ::Schedule::get().limits.payload_len.encode(), - )); - - // Call contract with too large a storage value. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - (::Schedule::get().limits.payload_len + 1).encode(), - ), - Error::::ValueTooLarge, - ); - }); + GAS_LIMIT, + (::Schedule::get().limits.payload_len + 1).encode(), + ), + Error::::ValueTooLarge, + ); + }); } #[test] @@ -608,47 +573,50 @@ fn run_out_of_gas() { let (wasm, code_hash) = compile_module::("run_out_of_gas").unwrap(); let subsistence = Pallet::::subsistence_threshold(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100 * subsistence, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Call the contract with a fixed gas limit. It must run out of gas because it just + // loops forever. + assert_err_ignore_postinfo!( + Contracts::call( Origin::signed(ALICE), - 100 * subsistence, - GAS_LIMIT, - wasm, - vec![], + addr, // newly created account + 0, + 67_500_000, vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - - // Call the contract with a fixed gas limit. It must run out of gas because it just - // loops forever. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - addr, // newly created account - 0, - 67_500_000, - vec![], - ), - Error::::OutOfGas, - ); - }); + ), + Error::::OutOfGas, + ); + }); } /// Input data for each call in set_rent code mod call { use super::{AccountIdOf, Test}; - pub fn set_storage_4_byte() -> Vec { 0u32.to_le_bytes().to_vec() } - pub fn remove_storage_4_byte() -> Vec { 1u32.to_le_bytes().to_vec() } + pub fn set_storage_4_byte() -> Vec { + 0u32.to_le_bytes().to_vec() + } + pub fn remove_storage_4_byte() -> Vec { + 1u32.to_le_bytes().to_vec() + } #[allow(dead_code)] pub fn transfer(to: &AccountIdOf) -> Vec { 2u32.to_le_bytes().iter().chain(AsRef::<[u8]>::as_ref(to)).cloned().collect() } - pub fn null() -> Vec { 3u32.to_le_bytes().to_vec() } + pub fn null() -> Vec { + 3u32.to_le_bytes().to_vec() + } } #[test] @@ -656,117 +624,71 @@ fn storage_size() { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); // Storage size - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - // rent_allowance - ::Balance::from(10_000u32).encode(), - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let bob_contract = ContractInfoOf::::get(&addr) - .unwrap() - .get_alive() - .unwrap(); - assert_eq!( - bob_contract.storage_size, - 4 - ); - assert_eq!( - bob_contract.pair_count, - 1, - ); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + // rent_allowance + ::Balance::from(10_000u32).encode(), + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(bob_contract.storage_size, 4); + assert_eq!(bob_contract.pair_count, 1,); - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - call::set_storage_4_byte() - )); - let bob_contract = ContractInfoOf::::get(&addr) - .unwrap() - .get_alive() - .unwrap(); - assert_eq!( - bob_contract.storage_size, - 4 + 4 - ); - assert_eq!( - bob_contract.pair_count, - 2, - ); + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + call::set_storage_4_byte() + )); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(bob_contract.storage_size, 4 + 4); + assert_eq!(bob_contract.pair_count, 2,); - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - call::remove_storage_4_byte() - )); - let bob_contract = ContractInfoOf::::get(&addr) - .unwrap() - .get_alive() - .unwrap(); - assert_eq!( - bob_contract.storage_size, - 4 - ); - assert_eq!( - bob_contract.pair_count, - 1, - ); - }); + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + call::remove_storage_4_byte() + )); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(bob_contract.storage_size, 4); + assert_eq!(bob_contract.pair_count, 1,); + }); } #[test] fn empty_kv_pairs() { let (wasm, code_hash) = compile_module::("set_empty_storage").unwrap(); - ExtBuilder::default() - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let bob_contract = ContractInfoOf::::get(&addr) - .unwrap() - .get_alive() - .unwrap(); + ExtBuilder::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let bob_contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); - assert_eq!( - bob_contract.storage_size, - 0, - ); - assert_eq!( - bob_contract.pair_count, - 1, - ); - }); + assert_eq!(bob_contract.storage_size, 0,); + assert_eq!(bob_contract.pair_count, 1,); + }); } fn initialize_block(number: u64) { - System::initialize( - &number, - &[0u8; 32].into(), - &Default::default(), - Default::default(), - ); + System::initialize(&number, &[0u8; 32].into(), &Default::default(), Default::default()); } #[test] @@ -775,83 +697,92 @@ fn deduct_blocks() { let endowment: BalanceOf = 100_000; let allowance: BalanceOf = 70_000; - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - endowment, - GAS_LIMIT, - wasm, - allowance.encode(), - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); - let code_len: BalanceOf = - PrefabWasmModule::::from_storage_noinstr(contract.code_hash) - .unwrap() - .occupied_storage() - .into(); - - // The instantiation deducted the rent for one block immediately - let rent0 = ::RentFraction::get() + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + endowment, + GAS_LIMIT, + wasm, + allowance.encode(), + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + let code_len: BalanceOf = + PrefabWasmModule::::from_storage_noinstr(contract.code_hash) + .unwrap() + .occupied_storage() + .into(); + + // The instantiation deducted the rent for one block immediately + let rent0 = ::RentFraction::get() // (base_deposit(8) + bytes in storage(4) + size of code) * byte_price // + 1 storage item (10_000) - free_balance .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - endowment) // blocks to rent * 1; - assert!(rent0 > 0); - assert_eq!(contract.rent_allowance, allowance - rent0); - assert_eq!(contract.deduct_block, 1); - assert_eq!(Balances::free_balance(&addr), endowment - rent0); + assert!(rent0 > 0); + assert_eq!(contract.rent_allowance, allowance - rent0); + assert_eq!(contract.deduct_block, 1); + assert_eq!(Balances::free_balance(&addr), endowment - rent0); - // Advance 4 blocks - initialize_block(5); + // Advance 4 blocks + initialize_block(5); - // Trigger rent through call - assert_ok!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) - ); + // Trigger rent through call + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + call::null() + )); - // Check result - let rent = ::RentFraction::get() - .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - (endowment - rent0)) - * 4; - let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); - assert_eq!(contract.rent_allowance, allowance - rent0 - rent); - assert_eq!(contract.deduct_block, 5); - assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent); - - // Advance 2 blocks more - initialize_block(7); - - // Trigger rent through call - assert_ok!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) - ); + // Check result + let rent = ::RentFraction::get() + .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - (endowment - rent0)) * + 4; + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(contract.rent_allowance, allowance - rent0 - rent); + assert_eq!(contract.deduct_block, 5); + assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent); - // Check result - let rent_2 = ::RentFraction::get() - .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - (endowment - rent0 - rent)) - * 2; - let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); - assert_eq!(contract.rent_allowance, allowance - rent0 - rent - rent_2); - assert_eq!(contract.deduct_block, 7); - assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent - rent_2); - - // Second call on same block should have no effect on rent - assert_ok!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) - ); - let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); - assert_eq!(contract.rent_allowance, allowance - rent0 - rent - rent_2); - assert_eq!(contract.deduct_block, 7); - assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent - rent_2) - }); + // Advance 2 blocks more + initialize_block(7); + + // Trigger rent through call + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + call::null() + )); + + // Check result + let rent_2 = ::RentFraction::get() + .mul_ceil((8 + 4 + code_len) * 10_000 + 10_000 - (endowment - rent0 - rent)) * + 2; + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(contract.rent_allowance, allowance - rent0 - rent - rent_2); + assert_eq!(contract.deduct_block, 7); + assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent - rent_2); + + // Second call on same block should have no effect on rent + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + call::null() + )); + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + assert_eq!(contract.rent_allowance, allowance - rent0 - rent - rent_2); + assert_eq!(contract.deduct_block, 7); + assert_eq!(Balances::free_balance(&addr), endowment - rent0 - rent - rent_2) + }); } #[test] @@ -867,16 +798,48 @@ fn signed_claim_surcharge_contract_removals() { #[test] fn claim_surcharge_malus() { // Test surcharge malus for inherent - claim_surcharge(8, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); - claim_surcharge(7, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); - claim_surcharge(6, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); - claim_surcharge(5, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), false); + claim_surcharge( + 8, + |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), + true, + ); + claim_surcharge( + 7, + |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), + true, + ); + claim_surcharge( + 6, + |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), + true, + ); + claim_surcharge( + 5, + |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), + false, + ); // Test surcharge malus for signed - claim_surcharge(8, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), true); - claim_surcharge(7, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); - claim_surcharge(6, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); - claim_surcharge(5, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); + claim_surcharge( + 8, + |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), + true, + ); + claim_surcharge( + 7, + |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), + false, + ); + claim_surcharge( + 6, + |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), + false, + ); + claim_surcharge( + 5, + |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), + false, + ); } /// Claim surcharge with the given trigger_call at the given blocks. @@ -884,203 +847,174 @@ fn claim_surcharge_malus() { fn claim_surcharge(blocks: u64, trigger_call: impl Fn(AccountIdOf) -> bool, removes: bool) { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - wasm, - ::Balance::from(30_000u32).encode(), // rent allowance - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + ::Balance::from(30_000u32).encode(), // rent allowance + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Advance blocks - initialize_block(blocks); + // Advance blocks + initialize_block(blocks); - // Trigger rent through call - assert_eq!(trigger_call(addr.clone()), removes); + // Trigger rent through call + assert_eq!(trigger_call(addr.clone()), removes); - if removes { - assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); - } else { - assert!(ContractInfoOf::::get(&addr).unwrap().get_alive().is_some()); - } - }); + if removes { + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + } else { + assert!(ContractInfoOf::::get(&addr).unwrap().get_alive().is_some()); + } + }); } /// Test for all kind of removals for the given trigger: /// * if balance is reached and balance > subsistence threshold /// * if allowance is exceeded /// * if balance is reached and balance < subsistence threshold -/// * this case cannot be triggered by a contract: we check whether a tombstone is left +/// * this case cannot be triggered by a contract: we check whether a tombstone is left fn removals(trigger_call: impl Fn(AccountIdOf) -> bool) { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); // Balance reached and superior to subsistence threshold - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 70_000, - GAS_LIMIT, - wasm.clone(), - ::Balance::from(100_000u32).encode(), // rent allowance - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let allowance = ContractInfoOf::::get(&addr) - .unwrap().get_alive().unwrap().rent_allowance; - let balance = Balances::free_balance(&addr); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 70_000, + GAS_LIMIT, + wasm.clone(), + ::Balance::from(100_000u32).encode(), /* rent allowance */ + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let allowance = + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance; + let balance = Balances::free_balance(&addr); - let subsistence_threshold = Pallet::::subsistence_threshold(); + let subsistence_threshold = Pallet::::subsistence_threshold(); - // Trigger rent must have no effect - assert!(!trigger_call(addr.clone())); - assert_eq!( - ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance, - allowance, - ); - assert_eq!(Balances::free_balance(&addr), balance); + // Trigger rent must have no effect + assert!(!trigger_call(addr.clone())); + assert_eq!( + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance, + allowance, + ); + assert_eq!(Balances::free_balance(&addr), balance); - // Advance blocks - initialize_block(27); + // Advance blocks + initialize_block(27); - // Trigger rent through call (should remove the contract) - assert!(trigger_call(addr.clone())); - assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(&addr), subsistence_threshold); + // Trigger rent through call (should remove the contract) + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); - // Advance blocks - initialize_block(30); + // Advance blocks + initialize_block(30); - // Trigger rent must have no effect - assert!(!trigger_call(addr.clone())); - assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(&addr), subsistence_threshold); - }); + // Trigger rent must have no effect + assert!(!trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); + }); // Allowance exceeded - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - wasm.clone(), - ::Balance::from(70_000u32).encode(), // rent allowance - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let allowance = ContractInfoOf::::get(&addr) - .unwrap().get_alive().unwrap().rent_allowance; - let balance = Balances::free_balance(&addr); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm.clone(), + ::Balance::from(70_000u32).encode(), // rent allowance + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let allowance = + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance; + let balance = Balances::free_balance(&addr); - // Trigger rent must have no effect - assert!(!trigger_call(addr.clone())); - assert_eq!( - ContractInfoOf::::get(&addr) - .unwrap() - .get_alive() - .unwrap() - .rent_allowance, - allowance, - ); - assert_eq!(Balances::free_balance(&addr), balance); + // Trigger rent must have no effect + assert!(!trigger_call(addr.clone())); + assert_eq!( + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance, + allowance, + ); + assert_eq!(Balances::free_balance(&addr), balance); - // Advance blocks - initialize_block(27); + // Advance blocks + initialize_block(27); - // Trigger rent through call - assert!(trigger_call(addr.clone())); - assert!(ContractInfoOf::::get(&addr) - .unwrap() - .get_tombstone() - .is_some()); - // Balance should be initial balance - initial rent_allowance - assert_eq!(Balances::free_balance(&addr), 30_000); + // Trigger rent through call + assert!(trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + // Balance should be initial balance - initial rent_allowance + assert_eq!(Balances::free_balance(&addr), 30_000); - // Advance blocks - initialize_block(20); + // Advance blocks + initialize_block(20); - // Trigger rent must have no effect - assert!(!trigger_call(addr.clone())); - assert!(ContractInfoOf::::get(&addr) - .unwrap() - .get_tombstone() - .is_some()); - assert_eq!(Balances::free_balance(&addr), 30_000); - }); + // Trigger rent must have no effect + assert!(!trigger_call(addr.clone())); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + assert_eq!(Balances::free_balance(&addr), 30_000); + }); // Balance reached and inferior to subsistence threshold - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let subsistence_threshold = Pallet::::subsistence_threshold(); - let _ = Balances::deposit_creating(&ALICE, subsistence_threshold * 1000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence_threshold * 100, - GAS_LIMIT, - wasm, - (subsistence_threshold * 100).encode(), // rent allowance - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let allowance = ContractInfoOf::::get(&addr) - .unwrap().get_alive().unwrap().rent_allowance; - let balance = Balances::free_balance(&addr); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let subsistence_threshold = Pallet::::subsistence_threshold(); + let _ = Balances::deposit_creating(&ALICE, subsistence_threshold * 1000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence_threshold * 100, + GAS_LIMIT, + wasm, + (subsistence_threshold * 100).encode(), // rent allowance + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let allowance = + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance; + let balance = Balances::free_balance(&addr); - // Trigger rent must have no effect - assert!(!trigger_call(addr.clone())); - assert_eq!( - ContractInfoOf::::get(&addr) - .unwrap() - .get_alive() - .unwrap() - .rent_allowance, - allowance, - ); - assert_eq!( - Balances::free_balance(&addr), - balance, - ); + // Trigger rent must have no effect + assert!(!trigger_call(addr.clone())); + assert_eq!( + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap().rent_allowance, + allowance, + ); + assert_eq!(Balances::free_balance(&addr), balance,); - // Make contract have exactly the subsistence threshold - Balances::make_free_balance_be(&addr, subsistence_threshold); - assert_eq!(Balances::free_balance(&addr), subsistence_threshold); + // Make contract have exactly the subsistence threshold + Balances::make_free_balance_be(&addr, subsistence_threshold); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); - // Advance blocks (should remove as balance is exactly subsistence) - initialize_block(10); + // Advance blocks (should remove as balance is exactly subsistence) + initialize_block(10); - // Trigger rent through call - assert!(trigger_call(addr.clone())); - assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(&addr), subsistence_threshold); + // Trigger rent through call + assert!(trigger_call(addr.clone())); + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); - // Advance blocks - initialize_block(20); + // Advance blocks + initialize_block(20); - // Trigger rent must have no effect - assert!(!trigger_call(addr.clone())); - assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); - assert_eq!(Balances::free_balance(&addr), subsistence_threshold); - }); + // Trigger rent must have no effect + assert!(!trigger_call(addr.clone())); + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Tombstone(_))); + assert_eq!(Balances::free_balance(&addr), subsistence_threshold); + }); } #[test] @@ -1088,97 +1022,99 @@ fn call_removed_contract() { let (wasm, code_hash) = compile_module::("set_rent").unwrap(); // Balance reached and superior to subsistence threshold - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - // rent allowance - ::Balance::from(10_000u32).encode(), - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + // rent allowance + ::Balance::from(10_000u32).encode(), + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Calling contract should succeed. - assert_ok!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) - ); + // Calling contract should succeed. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + call::null() + )); - // Advance blocks - initialize_block(27); + // Advance blocks + initialize_block(27); - // Calling contract should deny access because rent cannot be paid. - assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()), - Error::::RentNotPaid, - ); - // No event is generated because the contract is not actually removed. - assert_eq!(System::events(), vec![]); + // Calling contract should deny access because rent cannot be paid. + assert_err_ignore_postinfo!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()), + Error::::RentNotPaid, + ); + // No event is generated because the contract is not actually removed. + assert_eq!(System::events(), vec![]); - // Subsequent contract calls should also fail. - assert_err_ignore_postinfo!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()), - Error::::RentNotPaid, - ); + // Subsequent contract calls should also fail. + assert_err_ignore_postinfo!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()), + Error::::RentNotPaid, + ); - // A snitch can now remove the contract - assert_ok!(Contracts::claim_surcharge(Origin::none(), addr.clone(), Some(ALICE))); - assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); - }) + // A snitch can now remove the contract + assert_ok!(Contracts::claim_surcharge(Origin::none(), addr.clone(), Some(ALICE))); + assert!(ContractInfoOf::::get(&addr).unwrap().get_tombstone().is_some()); + }) } #[test] fn default_rent_allowance_on_instantiate() { let (wasm, code_hash) = compile_module::("check_default_rent_allowance").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); - let code_len: BalanceOf = + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + let code_len: BalanceOf = PrefabWasmModule::::from_storage_noinstr(contract.code_hash) .unwrap() .occupied_storage() .into(); - // The instantiation deducted the rent for one block immediately - let first_rent = ::RentFraction::get() + // The instantiation deducted the rent for one block immediately + let first_rent = ::RentFraction::get() // (base_deposit(8) + code_len) * byte_price - free_balance .mul_ceil((8 + code_len) * 10_000 - 30_000) // blocks to rent * 1; - assert_eq!(contract.rent_allowance, >::max_value() - first_rent); + assert_eq!(contract.rent_allowance, >::max_value() - first_rent); - // Advance blocks - initialize_block(5); + // Advance blocks + initialize_block(5); - // Trigger rent through call - assert_ok!( - Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, call::null()) - ); + // Trigger rent through call + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT, + call::null() + )); - // Check contract is still alive - let contract = ContractInfoOf::::get(&addr).unwrap().get_alive(); - assert!(contract.is_some()) - }); + // Check contract is still alive + let contract = ContractInfoOf::::get(&addr).unwrap().get_alive(); + assert!(contract.is_some()) + }); } #[test] @@ -1209,92 +1145,84 @@ fn restoration_success() { fn restoration( test_different_storage: bool, test_restore_to_with_dirty_storage: bool, - test_code_evicted: bool + test_code_evicted: bool, ) { let (set_rent_wasm, set_rent_code_hash) = compile_module::("set_rent").unwrap(); let (restoration_wasm, restoration_code_hash) = compile_module::("restoration").unwrap(); let allowance: ::Balance = 10_000; - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Create an account with address `BOB` with code `CODE_SET_RENT`. + // The input parameter sets the rent allowance to 0. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + set_rent_wasm.clone(), + allowance.encode(), + vec![], + )); + let addr_bob = Contracts::contract_address(&ALICE, &set_rent_code_hash, &[]); - // Create an account with address `BOB` with code `CODE_SET_RENT`. - // The input parameter sets the rent allowance to 0. + let mut events = vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::System(frame_system::Event::NewAccount(ALICE)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Balances(pallet_balances::Event::Endowed(ALICE, 1_000_000)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::System(frame_system::Event::NewAccount(addr_bob.clone())), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Balances(pallet_balances::Event::Endowed(addr_bob.clone(), 30_000)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Balances(pallet_balances::Event::Transfer( + ALICE, + addr_bob.clone(), + 30_000, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::CodeStored(set_rent_code_hash.into())), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::Contracts(crate::Event::Instantiated(ALICE, addr_bob.clone())), + topics: vec![], + }, + ]; + + // Create another contract from the same code in order to increment the codes + // refcounter so that it stays on chain. + if !test_code_evicted { assert_ok!(Contracts::instantiate_with_code( Origin::signed(ALICE), - 30_000, + 20_000, GAS_LIMIT, - set_rent_wasm.clone(), + set_rent_wasm, allowance.encode(), - vec![], + vec![1], )); - let addr_bob = Contracts::contract_address(&ALICE, &set_rent_code_hash, &[]); - - let mut events = vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount(ALICE)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Endowed(ALICE, 1_000_000) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount(addr_bob.clone())), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Endowed(addr_bob.clone(), 30_000) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Transfer(ALICE, addr_bob.clone(), 30_000) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::CodeStored(set_rent_code_hash.into()) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::Instantiated(ALICE, addr_bob.clone()) - ), - topics: vec![], - }, - ]; - - // Create another contract from the same code in order to increment the codes - // refcounter so that it stays on chain. - if !test_code_evicted { - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 20_000, - GAS_LIMIT, - set_rent_wasm, - allowance.encode(), - vec![1], - )); - assert_refcount!(set_rent_code_hash, 2); - let addr_dummy = Contracts::contract_address(&ALICE, &set_rent_code_hash, &[1]); - events.extend([ + assert_refcount!(set_rent_code_hash, 2); + let addr_dummy = Contracts::contract_address(&ALICE, &set_rent_code_hash, &[1]); + events.extend( + [ EventRecord { phase: Phase::Initialization, event: Event::System(frame_system::Event::NewAccount(addr_dummy.clone())), @@ -1302,146 +1230,144 @@ fn restoration( }, EventRecord { phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Endowed(addr_dummy.clone(), 20_000) - ), + event: Event::Balances(pallet_balances::Event::Endowed( + addr_dummy.clone(), + 20_000, + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Transfer(ALICE, addr_dummy.clone(), 20_000) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + ALICE, + addr_dummy.clone(), + 20_000, + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::Instantiated(ALICE, addr_dummy.clone()) - ), + event: Event::Contracts(crate::Event::Instantiated( + ALICE, + addr_dummy.clone(), + )), topics: vec![], }, - ].iter().cloned()); - } - - assert_eq!(System::events(), events); + ] + .iter() + .cloned(), + ); + } - // Check if `BOB` was created successfully and that the rent allowance is below what - // we specified as the first rent was already collected. - let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap().get_alive().unwrap(); - assert!(bob_contract.rent_allowance < allowance); - - if test_different_storage { - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr_bob.clone(), 0, GAS_LIMIT, - call::set_storage_4_byte()) - ); - } + assert_eq!(System::events(), events); - // Advance blocks in order to make the contract run out of money for rent. - initialize_block(27); - - // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 20_000 - // we expect that it is no longer callable but keeps existing until someone - // calls `claim_surcharge`. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), addr_bob.clone(), 0, GAS_LIMIT, call::null() - ), - Error::::RentNotPaid, - ); - assert!(System::events().is_empty()); - assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_alive().is_some()); - assert_ok!(Contracts::claim_surcharge(Origin::none(), addr_bob.clone(), Some(ALICE))); - assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); - if test_code_evicted { - assert_refcount!(set_rent_code_hash, 0); - } else { - assert_refcount!(set_rent_code_hash, 1); - } + // Check if `BOB` was created successfully and that the rent allowance is below what + // we specified as the first rent was already collected. + let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap().get_alive().unwrap(); + assert!(bob_contract.rent_allowance < allowance); - // Create another account with the address `DJANGO` with `CODE_RESTORATION`. - // - // Note that we can't use `ALICE` for creating `DJANGO` so we create yet another - // account `CHARLIE` and create `DJANGO` with it. - let _ = Balances::deposit_creating(&CHARLIE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(CHARLIE), - 30_000, + if test_different_storage { + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr_bob.clone(), + 0, GAS_LIMIT, - restoration_wasm, - vec![], - vec![], + call::set_storage_4_byte() )); - let addr_django = Contracts::contract_address(&CHARLIE, &restoration_code_hash, &[]); + } - // Before performing a call to `DJANGO` save its original trie id. - let django_trie_id = ContractInfoOf::::get(&addr_django).unwrap() - .get_alive().unwrap().trie_id; + // Advance blocks in order to make the contract run out of money for rent. + initialize_block(27); - // The trie is regarded as 'dirty' when it was written to in the current block. - if !test_restore_to_with_dirty_storage { - // Advance 1 block. - initialize_block(28); - } + // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 20_000 + // we expect that it is no longer callable but keeps existing until someone + // calls `claim_surcharge`. + assert_err_ignore_postinfo!( + Contracts::call(Origin::signed(ALICE), addr_bob.clone(), 0, GAS_LIMIT, call::null()), + Error::::RentNotPaid, + ); + assert!(System::events().is_empty()); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_alive().is_some()); + assert_ok!(Contracts::claim_surcharge(Origin::none(), addr_bob.clone(), Some(ALICE))); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); + if test_code_evicted { + assert_refcount!(set_rent_code_hash, 0); + } else { + assert_refcount!(set_rent_code_hash, 1); + } - // Perform a call to `DJANGO`. This should either perform restoration successfully or - // fail depending on the test parameters. - let perform_the_restoration = || { - Contracts::call( - Origin::signed(ALICE), - addr_django.clone(), - 0, - GAS_LIMIT, - set_rent_code_hash - .as_ref() - .iter() - .chain(AsRef::<[u8]>::as_ref(&addr_bob)) - .cloned() - .collect(), - ) - }; + // Create another account with the address `DJANGO` with `CODE_RESTORATION`. + // + // Note that we can't use `ALICE` for creating `DJANGO` so we create yet another + // account `CHARLIE` and create `DJANGO` with it. + let _ = Balances::deposit_creating(&CHARLIE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(CHARLIE), + 30_000, + GAS_LIMIT, + restoration_wasm, + vec![], + vec![], + )); + let addr_django = Contracts::contract_address(&CHARLIE, &restoration_code_hash, &[]); - // The key that is used in the restorer contract but is not in the target contract. - // Is supplied as delta to the restoration. We need it to check whether the key - // is properly removed on success but still there on failure. - let delta_key = { - let mut key = [0u8; 32]; - key[0] = 1; - key - }; + // Before performing a call to `DJANGO` save its original trie id. + let django_trie_id = + ContractInfoOf::::get(&addr_django).unwrap().get_alive().unwrap().trie_id; - if test_different_storage || test_restore_to_with_dirty_storage || test_code_evicted { - // Parametrization of the test imply restoration failure. Check that `DJANGO` aka - // restoration contract is still in place and also that `BOB` doesn't exist. - let result = perform_the_restoration(); - assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); - let django_contract = ContractInfoOf::::get(&addr_django).unwrap() - .get_alive().unwrap(); - assert_eq!(django_contract.storage_size, 8); - assert_eq!(django_contract.trie_id, django_trie_id); - assert_eq!(django_contract.deduct_block, System::block_number()); - assert_eq!( - Storage::::read(&django_trie_id, &delta_key), - Some(vec![40, 0, 0, 0]), - ); - match ( - test_different_storage, - test_restore_to_with_dirty_storage, - test_code_evicted - ) { - (true, false, false) => { - assert_err_ignore_postinfo!( - result, Error::::InvalidTombstone, - ); - assert_eq!(System::events(), vec![]); - } - (_, true, false) => { - assert_err_ignore_postinfo!( - result, Error::::InvalidContractOrigin, - ); - assert_eq!(System::events(), vec![ + // The trie is regarded as 'dirty' when it was written to in the current block. + if !test_restore_to_with_dirty_storage { + // Advance 1 block. + initialize_block(28); + } + + // Perform a call to `DJANGO`. This should either perform restoration successfully or + // fail depending on the test parameters. + let perform_the_restoration = || { + Contracts::call( + Origin::signed(ALICE), + addr_django.clone(), + 0, + GAS_LIMIT, + set_rent_code_hash + .as_ref() + .iter() + .chain(AsRef::<[u8]>::as_ref(&addr_bob)) + .cloned() + .collect(), + ) + }; + + // The key that is used in the restorer contract but is not in the target contract. + // Is supplied as delta to the restoration. We need it to check whether the key + // is properly removed on success but still there on failure. + let delta_key = { + let mut key = [0u8; 32]; + key[0] = 1; + key + }; + + if test_different_storage || test_restore_to_with_dirty_storage || test_code_evicted { + // Parametrization of the test imply restoration failure. Check that `DJANGO` aka + // restoration contract is still in place and also that `BOB` doesn't exist. + let result = perform_the_restoration(); + assert!(ContractInfoOf::::get(&addr_bob).unwrap().get_tombstone().is_some()); + let django_contract = + ContractInfoOf::::get(&addr_django).unwrap().get_alive().unwrap(); + assert_eq!(django_contract.storage_size, 8); + assert_eq!(django_contract.trie_id, django_trie_id); + assert_eq!(django_contract.deduct_block, System::block_number()); + assert_eq!(Storage::::read(&django_trie_id, &delta_key), Some(vec![40, 0, 0, 0]),); + match (test_different_storage, test_restore_to_with_dirty_storage, test_code_evicted) { + (true, false, false) => { + assert_err_ignore_postinfo!(result, Error::::InvalidTombstone,); + assert_eq!(System::events(), vec![]); + }, + (_, true, false) => { + assert_err_ignore_postinfo!(result, Error::::InvalidContractOrigin,); + assert_eq!( + System::events(), + vec![ EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::Evicted(addr_bob)), @@ -1454,67 +1380,76 @@ fn restoration( }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed(CHARLIE, 1_000_000)), + event: Event::Balances(pallet_balances::Event::Endowed( + CHARLIE, 1_000_000 + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::System(frame_system::Event::NewAccount(addr_django.clone())), + event: Event::System(frame_system::Event::NewAccount( + addr_django.clone() + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances(pallet_balances::Event::Endowed(addr_django.clone(), 30_000)), + event: Event::Balances(pallet_balances::Event::Endowed( + addr_django.clone(), + 30_000 + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Transfer(CHARLIE, addr_django.clone(), 30_000) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + CHARLIE, + addr_django.clone(), + 30_000 + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::CodeStored(restoration_code_hash) - ), + event: Event::Contracts(crate::Event::CodeStored( + restoration_code_hash + )), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::Instantiated(CHARLIE, addr_django.clone()) - ), + event: Event::Contracts(crate::Event::Instantiated( + CHARLIE, + addr_django.clone() + )), topics: vec![], }, + ] + ); + }, + (false, false, true) => { + assert_err_ignore_postinfo!(result, Error::::CodeNotFound,); + assert_refcount!(set_rent_code_hash, 0); + assert_eq!(System::events(), vec![]); + }, + _ => unreachable!(), + } + } else { + assert_ok!(perform_the_restoration()); + assert_refcount!(set_rent_code_hash, 2); - ]); - }, - (false, false, true) => { - assert_err_ignore_postinfo!( - result, Error::::CodeNotFound, - ); - assert_refcount!(set_rent_code_hash, 0); - assert_eq!(System::events(), vec![]); - }, - _ => unreachable!(), - } - } else { - assert_ok!(perform_the_restoration()); - assert_refcount!(set_rent_code_hash, 2); - - // Here we expect that the restoration is succeeded. Check that the restoration - // contract `DJANGO` ceased to exist and that `BOB` returned back. - let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap() - .get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 50); - assert_eq!(bob_contract.storage_size, 4); - assert_eq!(bob_contract.trie_id, django_trie_id); - assert_eq!(bob_contract.deduct_block, System::block_number()); - assert!(ContractInfoOf::::get(&addr_django).is_none()); - assert_matches!(Storage::::read(&django_trie_id, &delta_key), None); - assert_eq!(System::events(), vec![ + // Here we expect that the restoration is succeeded. Check that the restoration + // contract `DJANGO` ceased to exist and that `BOB` returned back. + let bob_contract = ContractInfoOf::::get(&addr_bob).unwrap().get_alive().unwrap(); + assert_eq!(bob_contract.rent_allowance, 50); + assert_eq!(bob_contract.storage_size, 4); + assert_eq!(bob_contract.trie_id, django_trie_id); + assert_eq!(bob_contract.deduct_block, System::block_number()); + assert!(ContractInfoOf::::get(&addr_django).is_none()); + assert_matches!(Storage::::read(&django_trie_id, &delta_key), None); + assert_eq!( + System::events(), + vec![ EventRecord { phase: Phase::Initialization, event: Event::Contracts(crate::Event::CodeRemoved(restoration_code_hash)), @@ -1527,60 +1462,59 @@ fn restoration( }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::Restored( - addr_django, addr_bob, bob_contract.code_hash, 50 - ) - ), + event: Event::Contracts(crate::Event::Restored( + addr_django, + addr_bob, + bob_contract.code_hash, + 50 + )), topics: vec![], }, - ]); - } - }); + ] + ); + } + }); } #[test] fn storage_max_value_limit() { let (wasm, code_hash) = compile_module::("storage_size").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + ContractInfoOf::::get(&addr).unwrap().get_alive().unwrap(); - // Call contract with allowed storage value. - assert_ok!(Contracts::call( + // Call contract with allowed storage value. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr.clone(), + 0, + GAS_LIMIT * 2, // we are copying a huge buffer + ::Schedule::get().limits.payload_len.encode(), + )); + + // Call contract with too large a storage value. + assert_err_ignore_postinfo!( + Contracts::call( Origin::signed(ALICE), - addr.clone(), + addr, 0, - GAS_LIMIT * 2, // we are copying a huge buffer - ::Schedule::get().limits.payload_len.encode(), - )); - - // Call contract with too large a storage value. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - (::Schedule::get().limits.payload_len + 1).encode(), - ), - Error::::ValueTooLarge, - ); - }); + GAS_LIMIT, + (::Schedule::get().limits.payload_len + 1).encode(), + ), + Error::::ValueTooLarge, + ); + }); } #[test] @@ -1588,187 +1522,145 @@ fn deploy_and_call_other_contract() { let (callee_wasm, callee_code_hash) = compile_module::("return_with_data").unwrap(); let (caller_wasm, caller_code_hash) = compile_module::("caller_contract").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - caller_wasm, - vec![], - vec![], - )); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - callee_wasm, - 0u32.to_le_bytes().encode(), - vec![42], - )); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + caller_wasm, + vec![], + vec![], + )); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + callee_wasm, + 0u32.to_le_bytes().encode(), + vec![42], + )); - // Call BOB contract, which attempts to instantiate and call the callee contract and - // makes various assertions on the results from those calls. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - Contracts::contract_address(&ALICE, &caller_code_hash, &[]), - 0, - GAS_LIMIT, - callee_code_hash.as_ref().to_vec(), - )); - }); + // Call BOB contract, which attempts to instantiate and call the callee contract and + // makes various assertions on the results from those calls. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + Contracts::contract_address(&ALICE, &caller_code_hash, &[]), + 0, + GAS_LIMIT, + callee_code_hash.as_ref().to_vec(), + )); + }); } #[test] fn cannot_self_destruct_through_draning() { let (wasm, code_hash) = compile_module::("drain").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(&addr), - Some(ContractInfo::Alive(_)) - ); + // Check that the BOB contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_))); - // Call BOB which makes it send all funds to the zero address - // The contract code asserts that the correct error value is returned. - assert_ok!( - Contracts::call( - Origin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - vec![], - ) - ); - }); + // Call BOB which makes it send all funds to the zero address + // The contract code asserts that the correct error value is returned. + assert_ok!(Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, vec![],)); + }); } #[test] fn cannot_self_destruct_while_live() { let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(&addr), - Some(ContractInfo::Alive(_)) - ); + // Check that the BOB contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_))); - // Call BOB with input data, forcing it make a recursive call to itself to - // self-destruct, resulting in a trap. - assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![0], - ), - Error::::ContractTrapped, - ); + // Call BOB with input data, forcing it make a recursive call to itself to + // self-destruct, resulting in a trap. + assert_err_ignore_postinfo!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![0],), + Error::::ContractTrapped, + ); - // Check that BOB is still alive. - assert_matches!( - ContractInfoOf::::get(&addr), - Some(ContractInfo::Alive(_)) - ); - }); + // Check that BOB is still alive. + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_))); + }); } #[test] fn self_destruct_works() { let (wasm, code_hash) = compile_module::("self_destruct").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - let _ = Balances::deposit_creating(&DJANGO, 1_000_000); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let _ = Balances::deposit_creating(&DJANGO, 1_000_000); - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(&addr), - Some(ContractInfo::Alive(_)) - ); + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Drop all previous events - initialize_block(2); - - // Call BOB without input data which triggers termination. - assert_matches!( - Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![], - ), - Ok(_) - ); + // Check that the BOB contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr), Some(ContractInfo::Alive(_))); + + // Drop all previous events + initialize_block(2); + + // Call BOB without input data which triggers termination. + assert_matches!( + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],), + Ok(_) + ); - // The call triggers rent collection that reduces the amount of balance - // that remains for the beneficiary. - let balance_after_rent = 93_078; + // The call triggers rent collection that reduces the amount of balance + // that remains for the beneficiary. + let balance_after_rent = 93_078; - pretty_assertions::assert_eq!(System::events(), vec![ + pretty_assertions::assert_eq!( + System::events(), + vec![ EventRecord { phase: Phase::Initialization, - event: Event::System( - frame_system::Event::KilledAccount(addr.clone()) - ), + event: Event::System(frame_system::Event::KilledAccount(addr.clone())), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Balances( - pallet_balances::Event::Transfer(addr.clone(), DJANGO, balance_after_rent) - ), + event: Event::Balances(pallet_balances::Event::Transfer( + addr.clone(), + DJANGO, + balance_after_rent + )), topics: vec![], }, EventRecord { @@ -1778,20 +1670,19 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts( - crate::Event::Terminated(addr.clone(), DJANGO) - ), + event: Event::Contracts(crate::Event::Terminated(addr.clone(), DJANGO)), topics: vec![], }, - ]); + ] + ); - // Check that account is gone - assert!(ContractInfoOf::::get(&addr).is_none()); + // Check that account is gone + assert!(ContractInfoOf::::get(&addr).is_none()); - // check that the beneficiary (django) got remaining balance - // some rent was deducted before termination - assert_eq!(Balances::free_balance(DJANGO), 1_000_000 + balance_after_rent); - }); + // check that the beneficiary (django) got remaining balance + // some rent was deducted before termination + assert_eq!(Balances::free_balance(DJANGO), 1_000_000 + balance_after_rent); + }); } // This tests that one contract cannot prevent another from self-destructing by sending it @@ -1801,134 +1692,116 @@ fn destroy_contract_and_transfer_funds() { let (callee_wasm, callee_code_hash) = compile_module::("self_destruct").unwrap(); let (caller_wasm, caller_code_hash) = compile_module::("destroy_and_transfer").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - // Create - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 200_000, - GAS_LIMIT, - callee_wasm, - vec![], - vec![42] - )); - - // This deploys the BOB contract, which in turn deploys the CHARLIE contract during - // construction. - assert_ok!(Contracts::instantiate_with_code( - Origin::signed(ALICE), - 200_000, - GAS_LIMIT, - caller_wasm, - callee_code_hash.as_ref().to_vec(), - vec![], - )); - let addr_bob = Contracts::contract_address(&ALICE, &caller_code_hash, &[]); - let addr_charlie = Contracts::contract_address( - &addr_bob, &callee_code_hash, &[0x47, 0x11] - ); - - // Check that the CHARLIE contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(&addr_charlie), - Some(ContractInfo::Alive(_)) - ); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + // Create + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 200_000, + GAS_LIMIT, + callee_wasm, + vec![], + vec![42] + )); - // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr_bob, - 0, - GAS_LIMIT, - addr_charlie.encode(), - )); + // This deploys the BOB contract, which in turn deploys the CHARLIE contract during + // construction. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 200_000, + GAS_LIMIT, + caller_wasm, + callee_code_hash.as_ref().to_vec(), + vec![], + )); + let addr_bob = Contracts::contract_address(&ALICE, &caller_code_hash, &[]); + let addr_charlie = Contracts::contract_address(&addr_bob, &callee_code_hash, &[0x47, 0x11]); - // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(ContractInfoOf::::get(&addr_charlie).is_none()); - }); -} + // Check that the CHARLIE contract has been instantiated. + assert_matches!(ContractInfoOf::::get(&addr_charlie), Some(ContractInfo::Alive(_))); -#[test] -fn cannot_self_destruct_in_constructor() { - let (wasm, _) = compile_module::("self_destructing_constructor").unwrap(); - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); - - // Fail to instantiate the BOB because the contructor calls seal_terminate. - assert_err_ignore_postinfo!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - 100_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - ), - Error::::TerminatedInConstructor, - ); - }); + // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + addr_bob, + 0, + GAS_LIMIT, + addr_charlie.encode(), + )); + + // Check that CHARLIE has moved on to the great beyond (ie. died). + assert!(ContractInfoOf::::get(&addr_charlie).is_none()); + }); } #[test] -fn crypto_hashes() { - let (wasm, code_hash) = compile_module::("crypto_hashes").unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - let _ = Balances::deposit_creating(&ALICE, 1_000_000); +fn cannot_self_destruct_in_constructor() { + let (wasm, _) = compile_module::("self_destructing_constructor").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); - // Instantiate the CRYPTO_HASHES contract. - assert_ok!(Contracts::instantiate_with_code( + // Fail to instantiate the BOB because the contructor calls seal_terminate. + assert_err_ignore_postinfo!( + Contracts::instantiate_with_code( Origin::signed(ALICE), 100_000, GAS_LIMIT, wasm, vec![], vec![], - )); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // Perform the call. - let input = b"_DEAD_BEEF"; - use sp_io::hashing::*; - // Wraps a hash function into a more dynamic form usable for testing. - macro_rules! dyn_hash_fn { - ($name:ident) => { - Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) - }; - } - // All hash functions and their associated output byte lengths. - let test_cases: &[(Box Box<[u8]>>, usize)] = &[ - (dyn_hash_fn!(sha2_256), 32), - (dyn_hash_fn!(keccak_256), 32), - (dyn_hash_fn!(blake2_256), 32), - (dyn_hash_fn!(blake2_128), 16), - ]; - // Test the given hash functions for the input: "_DEAD_BEEF" - for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { - // We offset data in the contract tables by 1. - let mut params = vec![(n + 1) as u8]; - params.extend_from_slice(input); - let result = >::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - params, - false, - ).result.unwrap(); - assert!(result.is_success()); - let expected = hash_fn(input.as_ref()); - assert_eq!(&result.data[..*expected_size], &*expected); - } - }) + ), + Error::::TerminatedInConstructor, + ); + }); +} + +#[test] +fn crypto_hashes() { + let (wasm, code_hash) = compile_module::("crypto_hashes").unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + + // Instantiate the CRYPTO_HASHES contract. + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 100_000, + GAS_LIMIT, + wasm, + vec![], + vec![], + )); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + // Perform the call. + let input = b"_DEAD_BEEF"; + use sp_io::hashing::*; + // Wraps a hash function into a more dynamic form usable for testing. + macro_rules! dyn_hash_fn { + ($name:ident) => { + Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) + }; + } + // All hash functions and their associated output byte lengths. + let test_cases: &[(Box Box<[u8]>>, usize)] = &[ + (dyn_hash_fn!(sha2_256), 32), + (dyn_hash_fn!(keccak_256), 32), + (dyn_hash_fn!(blake2_256), 32), + (dyn_hash_fn!(blake2_128), 16), + ]; + // Test the given hash functions for the input: "_DEAD_BEEF" + for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { + // We offset data in the contract tables by 1. + let mut params = vec![(n + 1) as u8]; + params.extend_from_slice(input); + let result = + >::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, params, false) + .result + .unwrap(); + assert!(result.is_success()); + let expected = hash_fn(input.as_ref()); + assert_eq!(&result.data[..*expected_size], &*expected); + } + }) } #[test] @@ -1938,28 +1811,21 @@ fn transfer_return_code() { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - wasm, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); // Contract has only the minimal balance so any transfer will return BelowSubsistence. Balances::make_free_balance_be(&addr, subsistence); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - vec![], - false, - ).result.unwrap(); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![], false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence @@ -1967,14 +1833,7 @@ fn transfer_return_code() { // the transfer still fails but with another return code. Balances::make_free_balance_be(&addr, subsistence + 100); Balances::reserve(&addr, subsistence + 100).unwrap(); - let result = Contracts::bare_call( - ALICE, - addr, - 0, - GAS_LIMIT, - vec![], - false, - ).result.unwrap(); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, vec![], false).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1988,16 +1847,14 @@ fn call_return_code() { let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - caller_code, - vec![0], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![0], + vec![], + ),); let addr_bob = Contracts::contract_address(&ALICE, &caller_hash, &[]); Balances::make_free_balance_be(&addr_bob, subsistence); @@ -2009,19 +1866,19 @@ fn call_return_code() { GAS_LIMIT, AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), false, - ).result.unwrap(); + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::NotCallable); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(CHARLIE), - subsistence * 100, - GAS_LIMIT, - callee_code, - vec![0], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(CHARLIE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![0], + vec![], + ),); let addr_django = Contracts::contract_address(&CHARLIE, &callee_hash, &[]); Balances::make_free_balance_be(&addr_django, subsistence); @@ -2031,9 +1888,15 @@ fn call_return_code() { addr_bob.clone(), 0, GAS_LIMIT, - AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), false, - ).result.unwrap(); + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence @@ -2046,9 +1909,15 @@ fn call_return_code() { addr_bob.clone(), 0, GAS_LIMIT, - AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&0u32.to_le_bytes()) + .cloned() + .collect(), false, - ).result.unwrap(); + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. @@ -2058,9 +1927,15 @@ fn call_return_code() { addr_bob.clone(), 0, GAS_LIMIT, - AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&1u32.to_le_bytes()).cloned().collect(), + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&1u32.to_le_bytes()) + .cloned() + .collect(), false, - ).result.unwrap(); + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. @@ -2069,11 +1944,16 @@ fn call_return_code() { addr_bob, 0, GAS_LIMIT, - AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&2u32.to_le_bytes()).cloned().collect(), + AsRef::<[u8]>::as_ref(&addr_django) + .iter() + .chain(&2u32.to_le_bytes()) + .cloned() + .collect(), false, - ).result.unwrap(); + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); - }); } @@ -2087,39 +1967,31 @@ fn instantiate_return_code() { let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); let callee_hash = callee_hash.as_ref().to_vec(); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - callee_code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![], + vec![], + ),); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - caller_code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &caller_hash, &[]); // Contract has only the minimal balance so any transfer will return BelowSubsistence. Balances::make_free_balance_be(&addr, subsistence); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - callee_hash.clone(), - false, - ).result.unwrap(); + let result = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, callee_hash.clone(), false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence @@ -2127,26 +1999,17 @@ fn instantiate_return_code() { // the transfer still fails but with another return code. Balances::make_free_balance_be(&addr, subsistence + 10_000); Balances::reserve(&addr, subsistence + 10_000).unwrap(); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - callee_hash.clone(), - false, - ).result.unwrap(); + let result = + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, callee_hash.clone(), false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid Balances::make_free_balance_be(&addr, subsistence + 10_000); - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - vec![0; 33], - false, - ).result.unwrap(); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![0; 33], false) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. @@ -2157,7 +2020,9 @@ fn instantiate_return_code() { GAS_LIMIT, callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), false, - ).result.unwrap(); + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. @@ -2168,9 +2033,10 @@ fn instantiate_return_code() { GAS_LIMIT, callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), false, - ).result.unwrap(); + ) + .result + .unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); - }); } @@ -2201,26 +2067,18 @@ fn disabled_chain_extension_errors_on_call() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); TestExtension::disable(); assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![], - ), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],), Error::::NoChainExtension, ); }); @@ -2232,16 +2090,14 @@ fn chain_extension_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); // The contract takes a up to 2 byte buffer where the first byte passed is used as @@ -2249,51 +2105,27 @@ fn chain_extension_works() { // func_id. // 0 = read input buffer and pass it through as output - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - vec![0, 99], - false, - ); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![0, 99], false); let gas_consumed = result.gas_consumed; assert_eq!(TestExtension::last_seen_buffer(), vec![0, 99]); assert_eq!(result.result.unwrap().data, Bytes(vec![0, 99])); // 1 = treat inputs as integer primitives and store the supplied integers - Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - vec![1], - false, - ).result.unwrap(); + Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![1], false) + .result + .unwrap(); // those values passed in the fixture assert_eq!(TestExtension::last_seen_inputs(), (4, 1, 16, 12)); // 2 = charge some extra weight (amount supplied in second byte) - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - vec![2, 42], - false, - ); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![2, 42], false); assert_ok!(result.result); assert_eq!(result.gas_consumed, gas_consumed + 42); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - vec![3], - false, - ).result.unwrap(); + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![3], false) + .result + .unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); assert_eq!(result.data, Bytes(vec![42, 99])); }); @@ -2306,32 +2138,24 @@ fn lazy_removal_works() { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let info = >::get(&addr).unwrap().get_alive().unwrap(); + let info = >::get(&addr).unwrap().get_alive().unwrap(); let trie = &info.child_trie_info(); // Put value into the contracts child trie child::put(trie, &[99], &42); // Terminate the contract - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![], - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],)); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2355,10 +2179,9 @@ fn lazy_removal_partial_remove_works() { let extra_keys = 7u32; let weight_limit = 5_000_000_000; let (_, max_keys) = Storage::::deletion_budget(1, weight_limit); - let vals: Vec<_> = (0..max_keys + extra_keys).map(|i| { - (blake2_256(&i.encode()), (i as u32), (i as u32).encode()) - }) - .collect(); + let vals: Vec<_> = (0..max_keys + extra_keys) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); let mut ext = ExtBuilder::default().existential_deposit(50).build(); @@ -2366,39 +2189,27 @@ fn lazy_removal_partial_remove_works() { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let mut info = >::get(&addr).unwrap().get_alive().unwrap(); + let mut info = >::get(&addr).unwrap().get_alive().unwrap(); // Put value into the contracts child trie for val in &vals { - Storage::::write( - System::block_number(), - &mut info, - &val.0, - Some(val.2.clone()), - ).unwrap(); - } - >::insert(&addr, ContractInfo::Alive(info.clone())); - - // Terminate the contract - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![], - )); + Storage::::write(System::block_number(), &mut info, &val.0, Some(val.2.clone())) + .unwrap(); + } + >::insert(&addr, ContractInfo::Alive(info.clone())); + + // Terminate the contract + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],)); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2449,46 +2260,33 @@ fn lazy_removal_does_no_run_on_full_block() { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let mut info = >::get(&addr).unwrap().get_alive().unwrap(); + let mut info = >::get(&addr).unwrap().get_alive().unwrap(); let max_keys = 30; // Create some storage items for the contract. - let vals: Vec<_> = (0..max_keys).map(|i| { - (blake2_256(&i.encode()), (i as u32), (i as u32).encode()) - }) - .collect(); + let vals: Vec<_> = (0..max_keys) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); // Put value into the contracts child trie for val in &vals { - Storage::::write( - System::block_number(), - &mut info, - &val.0, - Some(val.2.clone()), - ).unwrap(); + Storage::::write(System::block_number(), &mut info, &val.0, Some(val.2.clone())) + .unwrap(); } >::insert(&addr, ContractInfo::Alive(info.clone())); // Terminate the contract - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![], - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],)); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2527,7 +2325,6 @@ fn lazy_removal_does_no_run_on_full_block() { }); } - #[test] fn lazy_removal_does_not_use_all_weight() { let (code, hash) = compile_module::("self_destruct").unwrap(); @@ -2535,47 +2332,34 @@ fn lazy_removal_does_not_use_all_weight() { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); - let mut info = >::get(&addr).unwrap().get_alive().unwrap(); + let mut info = >::get(&addr).unwrap().get_alive().unwrap(); let weight_limit = 5_000_000_000; let (weight_per_key, max_keys) = Storage::::deletion_budget(1, weight_limit); // We create a contract with one less storage item than we can remove within the limit - let vals: Vec<_> = (0..max_keys - 1).map(|i| { - (blake2_256(&i.encode()), (i as u32), (i as u32).encode()) - }) - .collect(); + let vals: Vec<_> = (0..max_keys - 1) + .map(|i| (blake2_256(&i.encode()), (i as u32), (i as u32).encode())) + .collect(); // Put value into the contracts child trie for val in &vals { - Storage::::write( - System::block_number(), - &mut info, - &val.0, - Some(val.2.clone()), - ).unwrap(); + Storage::::write(System::block_number(), &mut info, &val.0, Some(val.2.clone())) + .unwrap(); } >::insert(&addr, ContractInfo::Alive(info.clone())); // Terminate the contract - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![], - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],)); // Contract info should be gone assert!(!>::contains_key(&addr)); @@ -2607,16 +2391,14 @@ fn deletion_queue_full() { let subsistence = Pallet::::subsistence_threshold(); let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - code, - vec![], - vec![], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + code, + vec![], + vec![], + ),); let addr = Contracts::contract_address(&ALICE, &hash, &[]); @@ -2625,18 +2407,12 @@ fn deletion_queue_full() { // Terminate the contract should fail assert_err_ignore_postinfo!( - Contracts::call( - Origin::signed(ALICE), - addr.clone(), - 0, - GAS_LIMIT, - vec![], - ), + Contracts::call(Origin::signed(ALICE), addr.clone(), 0, GAS_LIMIT, vec![],), Error::::DeletionQueueFull, ); // Contract should be alive because removal failed - >::get(&addr).unwrap().get_alive().unwrap(); + >::get(&addr).unwrap().get_alive().unwrap(); // make the contract ripe for eviction initialize_block(5); @@ -2648,7 +2424,7 @@ fn deletion_queue_full() { ); // Contract should be alive because removal failed - >::get(&addr).unwrap().get_alive().unwrap(); + >::get(&addr).unwrap().get_alive().unwrap(); }); } @@ -2672,8 +2448,7 @@ fn not_deployed_if_endowment_too_low_for_first_rent() { 30_000, GAS_LIMIT, wasm, - (BalanceOf::::from(first_rent) - BalanceOf::::from(1u32)) - .encode(), // rent allowance + (BalanceOf::::from(first_rent) - BalanceOf::::from(1u32)).encode(), /* rent allowance */ vec![], ), Error::::NewContractNotFunded, @@ -2697,7 +2472,7 @@ fn surcharge_reward_is_capped() { vec![], )); let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let contract = >::get(&addr).unwrap().get_alive().unwrap(); + let contract = >::get(&addr).unwrap().get_alive().unwrap(); let balance = Balances::free_balance(&ALICE); let reward = ::SurchargeReward::get(); @@ -2768,13 +2543,7 @@ fn refcounter() { let addr2 = Contracts::contract_address(&ALICE, &code_hash, &[2]); // Terminating one contract should decrement the refcount - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr0, - 0, - GAS_LIMIT, - vec![], - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr0, 0, GAS_LIMIT, vec![],)); assert_refcount!(code_hash, 2); // make remaining contracts eligible for eviction @@ -2819,24 +2588,10 @@ fn reinstrument_does_charge() { // Call the contract two times without reinstrument - let result0 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - zero.clone(), - false, - ); + let result0 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, zero.clone(), false); assert!(result0.result.unwrap().is_success()); - let result1 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - zero.clone(), - false, - ); + let result1 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, zero.clone(), false); assert!(result1.result.unwrap().is_success()); // They should match because both where called with the same schedule. @@ -2849,14 +2604,7 @@ fn reinstrument_does_charge() { }); // This call should trigger reinstrumentation - let result2 = Contracts::bare_call( - ALICE, - addr.clone(), - 0, - GAS_LIMIT, - zero.clone(), - false, - ); + let result2 = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, zero.clone(), false); assert!(result2.result.unwrap().is_success()); assert!(result2.gas_consumed > result1.gas_consumed); assert_eq!( @@ -2873,25 +2621,16 @@ fn debug_message_works() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - ), - ); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let result = Contracts::bare_call( - ALICE, - addr, - 0, + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, GAS_LIMIT, + wasm, vec![], - true, - ); + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, vec![], true); assert_matches!(result.result, Ok(_)); assert_eq!(std::str::from_utf8(&result.debug_message).unwrap(), "Hello World!"); @@ -2905,35 +2644,20 @@ fn debug_message_logging_disabled() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - ), - ); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - // disable logging by passing `false` - let result = Contracts::bare_call( - ALICE, - addr.clone(), - 0, + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, GAS_LIMIT, + wasm, vec![], - false, - ); + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + // disable logging by passing `false` + let result = Contracts::bare_call(ALICE, addr.clone(), 0, GAS_LIMIT, vec![], false); assert_matches!(result.result, Ok(_)); // the dispatchables always run without debugging - assert_ok!(Contracts::call( - Origin::signed(ALICE), - addr, - 0, - GAS_LIMIT, - vec![], - )); + assert_ok!(Contracts::call(Origin::signed(ALICE), addr, 0, GAS_LIMIT, vec![],)); assert!(result.debug_message.is_empty()); }); } @@ -2945,25 +2669,16 @@ fn debug_message_invalid_utf8() { ExtBuilder::default().existential_deposit(50).build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - 30_000, - GAS_LIMIT, - wasm, - vec![], - vec![], - ), - ); - let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); - let result = Contracts::bare_call( - ALICE, - addr, - 0, + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + 30_000, GAS_LIMIT, + wasm, vec![], - true, - ); + vec![], + ),); + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + let result = Contracts::bare_call(ALICE, addr, 0, GAS_LIMIT, vec![], true); assert_err!(result.result, >::DebugMessageInvalidUTF8); }); } @@ -2977,28 +2692,24 @@ fn gas_estimation_nested_call_fixed_limit() { let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - caller_code, - vec![], - vec![0], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![], + vec![0], + ),); let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - callee_code, - vec![], - vec![1], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![], + vec![1], + ),); let addr_callee = Contracts::contract_address(&ALICE, &callee_hash, &[1]); let input: Vec = AsRef::<[u8]>::as_ref(&addr_callee) @@ -3008,27 +2719,16 @@ fn gas_estimation_nested_call_fixed_limit() { .collect(); // Call in order to determine the gas that is required for this call - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - input.clone(), - false, - ); + let result = + Contracts::bare_call(ALICE, addr_caller.clone(), 0, GAS_LIMIT, input.clone(), false); assert_ok!(&result.result); assert!(result.gas_required > result.gas_consumed); // Make the same call using the estimated gas. Should succeed. - assert_ok!(Contracts::bare_call( - ALICE, - addr_caller, - 0, - result.gas_required, - input, - false, - ).result); + assert_ok!( + Contracts::bare_call(ALICE, addr_caller, 0, result.gas_required, input, false,).result + ); }); } @@ -3042,53 +2742,39 @@ fn gas_estimation_call_runtime() { let _ = Balances::deposit_creating(&ALICE, 1000 * subsistence); let _ = Balances::deposit_creating(&CHARLIE, 1000 * subsistence); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - caller_code, - vec![], - vec![0], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + caller_code, + vec![], + vec![0], + ),); let addr_caller = Contracts::contract_address(&ALICE, &caller_hash, &[0]); - assert_ok!( - Contracts::instantiate_with_code( - Origin::signed(ALICE), - subsistence * 100, - GAS_LIMIT, - callee_code, - vec![], - vec![1], - ), - ); + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + callee_code, + vec![], + vec![1], + ),); let addr_callee = Contracts::contract_address(&ALICE, &callee_hash, &[1]); // Call something trivial with a huge gas limit so that we can observe the effects // of pre-charging. This should create a difference between consumed and required. let call = Call::Contracts(crate::Call::call(addr_callee, 0, GAS_LIMIT / 3, vec![])); - let result = Contracts::bare_call( - ALICE, - addr_caller.clone(), - 0, - GAS_LIMIT, - call.encode(), - false, - ); + let result = + Contracts::bare_call(ALICE, addr_caller.clone(), 0, GAS_LIMIT, call.encode(), false); assert_ok!(&result.result); assert!(result.gas_required > result.gas_consumed); // Make the same call using the required gas. Should succeed. - assert_ok!(Contracts::bare_call( - ALICE, - addr_caller, - 0, - result.gas_required, - call.encode(), - false, - ).result); + assert_ok!( + Contracts::bare_call(ALICE, addr_caller, 0, result.gas_required, call.encode(), false,) + .result + ); }); } diff --git a/substrate/frame/contracts/src/wasm/code_cache.rs b/substrate/frame/contracts/src/wasm/code_cache.rs index a2aa2b55e16571b53788d98c6e0ef69a224f602d..06329a7e81ad99f88e4968f32b143d5a68e0d40b 100644 --- a/substrate/frame/contracts/src/wasm/code_cache.rs +++ b/substrate/frame/contracts/src/wasm/code_cache.rs @@ -27,16 +27,17 @@ //! this guarantees that every instrumented contract code in cache cannot have the version equal to the current one. //! Thus, before executing a contract it should be reinstrument with new schedule. +#[cfg(feature = "runtime-benchmarks")] +pub use self::private::reinstrument; use crate::{ - CodeHash, CodeStorage, PristineCode, Schedule, Config, Error, Weight, - wasm::{prepare, PrefabWasmModule}, Pallet as Contracts, Event, gas::{GasMeter, Token}, + wasm::{prepare, PrefabWasmModule}, weights::WeightInfo, + CodeHash, CodeStorage, Config, Error, Event, Pallet as Contracts, PristineCode, Schedule, + Weight, }; -use sp_core::crypto::UncheckedFrom; use frame_support::dispatch::DispatchError; -#[cfg(feature = "runtime-benchmarks")] -pub use self::private::reinstrument as reinstrument; +use sp_core::crypto::UncheckedFrom; /// Put the instrumented module in storage. /// @@ -44,7 +45,7 @@ pub use self::private::reinstrument as reinstrument; /// under the specified `code_hash`. pub fn store(mut prefab_module: PrefabWasmModule) where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { let code_hash = sp_std::mem::take(&mut prefab_module.code_hash); @@ -53,14 +54,12 @@ where if let Some(code) = prefab_module.original_code.take() { >::insert(&code_hash, code); } - >::mutate(&code_hash, |existing| { - match existing { - Some(module) => increment_64(&mut module.refcount), - None => { - *existing = Some(prefab_module); - Contracts::::deposit_event(Event::CodeStored(code_hash)) - } - } + >::mutate(&code_hash, |existing| match existing { + Some(module) => increment_64(&mut module.refcount), + None => { + *existing = Some(prefab_module); + Contracts::::deposit_event(Event::CodeStored(code_hash)) + }, }); } @@ -69,7 +68,7 @@ where /// Removes the code instead of storing it when the refcount drops to zero. pub fn store_decremented(mut prefab_module: PrefabWasmModule) where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { prefab_module.refcount = prefab_module.refcount.saturating_sub(1); if prefab_module.refcount > 0 { @@ -81,10 +80,12 @@ where } /// Increment the refcount of a code in-storage by one. -pub fn increment_refcount(code_hash: CodeHash, gas_meter: &mut GasMeter) - -> Result<(), DispatchError> +pub fn increment_refcount( + code_hash: CodeHash, + gas_meter: &mut GasMeter, +) -> Result<(), DispatchError> where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { gas_meter.charge(CodeToken::UpdateRefcount(estimate_code_size::(&code_hash)?))?; >::mutate(code_hash, |existing| { @@ -98,10 +99,12 @@ where } /// Decrement the refcount of a code in-storage by one and remove the code when it drops to zero. -pub fn decrement_refcount(code_hash: CodeHash, gas_meter: &mut GasMeter) - -> Result<(), DispatchError> +pub fn decrement_refcount( + code_hash: CodeHash, + gas_meter: &mut GasMeter, +) -> Result<(), DispatchError> where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { if let Ok(len) = estimate_code_size::(&code_hash) { gas_meter.charge(CodeToken::UpdateRefcount(len))?; @@ -133,7 +136,7 @@ pub fn load( mut reinstrument: Option<(&Schedule, &mut GasMeter)>, ) -> Result, DispatchError> where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { // The reinstrument case coincides with the cases where we need to charge extra // based upon the code size: On-chain execution. @@ -141,8 +144,8 @@ where gas_meter.charge(CodeToken::Load(estimate_code_size::(&code_hash)?))?; } - let mut prefab_module = >::get(code_hash) - .ok_or_else(|| Error::::CodeNotFound)?; + let mut prefab_module = + >::get(code_hash).ok_or_else(|| Error::::CodeNotFound)?; prefab_module.code_hash = code_hash; if let Some((schedule, gas_meter)) = reinstrument { @@ -165,7 +168,7 @@ mod private { schedule: &Schedule, ) -> Result<(), DispatchError> where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { let original_code = >::get(&prefab_module.code_hash) .ok_or_else(|| Error::::CodeNotFound)?; @@ -179,7 +182,7 @@ mod private { /// Finish removal of a code by deleting the pristine code and emitting an event. fn finish_removal(code_hash: CodeHash) where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { >::remove(code_hash); Contracts::::deposit_event(Event::CodeRemoved(code_hash)) @@ -190,13 +193,15 @@ where /// We try hard to be infallible here because otherwise more storage transactions would be /// necessary to account for failures in storing code for an already instantiated contract. fn increment_64(refcount: &mut u64) { - *refcount = refcount.checked_add(1).expect(" + *refcount = refcount.checked_add(1).expect( + " refcount is 64bit. Generating this overflow would require to store _at least_ 18 exabyte of data assuming that a contract consumes only one byte of data. Any node would run out of storage space before hitting this overflow. qed - "); + ", + ); } /// Get the size of the instrumented code stored at `code_hash` without loading it. @@ -206,7 +211,7 @@ fn increment_64(refcount: &mut u64) { /// compared to the code size. Additionally, charging too much weight is completely safe. fn estimate_code_size(code_hash: &CodeHash) -> Result where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { let key = >::hashed_key_for(code_hash); let mut data = [0u8; 0]; @@ -229,7 +234,7 @@ enum CodeToken { impl Token for CodeToken where T: Config, - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { fn weight(&self) -> Weight { use self::CodeToken::*; @@ -240,9 +245,10 @@ where // the contract. match *self { Instrument(len) => T::WeightInfo::instrument(len / 1024), - Load(len) => T::WeightInfo::code_load(len / 1024).saturating_sub(T::WeightInfo::code_load(0)), - UpdateRefcount(len) => - T::WeightInfo::code_refcount(len / 1024).saturating_sub(T::WeightInfo::code_refcount(0)), + Load(len) => + T::WeightInfo::code_load(len / 1024).saturating_sub(T::WeightInfo::code_load(0)), + UpdateRefcount(len) => T::WeightInfo::code_refcount(len / 1024) + .saturating_sub(T::WeightInfo::code_refcount(0)), } } } diff --git a/substrate/frame/contracts/src/wasm/env_def/macros.rs b/substrate/frame/contracts/src/wasm/env_def/macros.rs index b7358f6aa234518dc27e6b72398d5e1e8d4162fe..8d316794c63961a6622108959f1e8032359c2c58 100644 --- a/substrate/frame/contracts/src/wasm/env_def/macros.rs +++ b/substrate/frame/contracts/src/wasm/env_def/macros.rs @@ -255,14 +255,14 @@ macro_rules! define_env { #[cfg(test)] mod tests { - use pwasm_utils::parity_wasm::elements::{FunctionType, ValueType}; - use sp_runtime::traits::Zero; - use sp_sandbox::{ReturnValue, Value}; use crate::{ - Weight, - wasm::{Runtime, runtime::TrapReason, tests::MockExt}, exec::Ext, + wasm::{runtime::TrapReason, tests::MockExt, Runtime}, + Weight, }; + use pwasm_utils::parity_wasm::elements::{FunctionType, ValueType}; + use sp_runtime::traits::Zero; + use sp_sandbox::{ReturnValue, Value}; struct TestRuntime { value: u32, @@ -333,16 +333,15 @@ mod tests { Err(TrapReason::Termination) } }); - let _f: fn(&mut Runtime, &[sp_sandbox::Value]) - -> Result = seal_gas::; + let _f: fn( + &mut Runtime, + &[sp_sandbox::Value], + ) -> Result = seal_gas::; } #[test] fn macro_gen_signature() { - assert_eq!( - gen_signature!((i32)), - FunctionType::new(vec![ValueType::I32], vec![]), - ); + assert_eq!(gen_signature!((i32)), FunctionType::new(vec![ValueType::I32], vec![]),); assert_eq!( gen_signature!( (i32, u32) -> u32 ), @@ -387,11 +386,11 @@ mod tests { }, ); - assert!( - Env::can_satisfy(b"seal0", b"seal_gas",&FunctionType::new(vec![ValueType::I32], vec![])) - ); - assert!( - !Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], vec![])) - ); + assert!(Env::can_satisfy( + b"seal0", + b"seal_gas", + &FunctionType::new(vec![ValueType::I32], vec![]) + )); + assert!(!Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], vec![]))); } } diff --git a/substrate/frame/contracts/src/wasm/env_def/mod.rs b/substrate/frame/contracts/src/wasm/env_def/mod.rs index 5855befd34cb254abb685321d56e9756c7e57b2f..6a55677f69a01d3513677d2b8d03d6de2069d14f 100644 --- a/substrate/frame/contracts/src/wasm/env_def/mod.rs +++ b/substrate/frame/contracts/src/wasm/env_def/mod.rs @@ -18,8 +18,8 @@ use super::Runtime; use crate::exec::Ext; -use sp_sandbox::Value; use pwasm_utils::parity_wasm::elements::{FunctionType, ValueType}; +use sp_sandbox::Value; #[macro_use] pub mod macros; @@ -67,11 +67,10 @@ impl ConvertibleToWasm for u64 { } } -pub type HostFunc = - fn( - &mut Runtime, - &[sp_sandbox::Value] - ) -> Result; +pub type HostFunc = fn( + &mut Runtime, + &[sp_sandbox::Value], +) -> Result; pub trait FunctionImplProvider { fn impls)>(f: &mut F); diff --git a/substrate/frame/contracts/src/wasm/mod.rs b/substrate/frame/contracts/src/wasm/mod.rs index ef45f35d0dae9569272bb449c02151e09c01acff..8ef11c8f4c87b41cef3b0707fd8b886c0305b17b 100644 --- a/substrate/frame/contracts/src/wasm/mod.rs +++ b/substrate/frame/contracts/src/wasm/mod.rs @@ -24,19 +24,19 @@ mod code_cache; mod prepare; mod runtime; +#[cfg(feature = "runtime-benchmarks")] +pub use self::code_cache::reinstrument; +pub use self::runtime::{ReturnCode, Runtime, RuntimeCosts}; use crate::{ - CodeHash, Schedule, Config, - wasm::env_def::FunctionImplProvider, - exec::{Ext, Executable, ExportedFunction, ExecResult}, + exec::{ExecResult, Executable, ExportedFunction, Ext}, gas::GasMeter, + wasm::env_def::FunctionImplProvider, + CodeHash, Config, Schedule, }; -use sp_std::prelude::*; -use sp_core::crypto::UncheckedFrom; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_support::dispatch::DispatchError; -pub use self::runtime::{ReturnCode, Runtime, RuntimeCosts}; -#[cfg(feature = "runtime-benchmarks")] -pub use self::code_cache::reinstrument; +use sp_core::crypto::UncheckedFrom; +use sp_std::prelude::*; #[cfg(test)] pub use tests::MockExt; @@ -108,12 +108,12 @@ impl ExportedFunction { impl PrefabWasmModule where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { /// Create the module by checking and instrumenting `original_code`. pub fn from_code( original_code: Vec, - schedule: &Schedule + schedule: &Schedule, ) -> Result { prepare::prepare_contract(original_code, schedule).map_err(Into::into) } @@ -127,7 +127,7 @@ where #[cfg(feature = "runtime-benchmarks")] pub fn store_code_unchecked( original_code: Vec, - schedule: &Schedule + schedule: &Schedule, ) -> Result<(), DispatchError> { let executable = prepare::benchmarking::prepare_contract(original_code, schedule) .map_err::(Into::into)?; @@ -150,7 +150,7 @@ where impl Executable for PrefabWasmModule where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { fn from_storage( code_hash: CodeHash, @@ -168,15 +168,14 @@ where code_cache::store_decremented(self); } - fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) - -> Result<(), DispatchError> - { + fn add_user(code_hash: CodeHash, gas_meter: &mut GasMeter) -> Result<(), DispatchError> { code_cache::increment_refcount::(code_hash, gas_meter) } - fn remove_user(code_hash: CodeHash, gas_meter: &mut GasMeter) - -> Result<(), DispatchError> - { + fn remove_user( + code_hash: CodeHash, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { code_cache::decrement_refcount::(code_hash, gas_meter) } @@ -187,16 +186,15 @@ where input_data: Vec, ) -> ExecResult { let memory = - sp_sandbox::Memory::new(self.initial, Some(self.maximum)) - .unwrap_or_else(|_| { + sp_sandbox::Memory::new(self.initial, Some(self.maximum)).unwrap_or_else(|_| { // unlike `.expect`, explicit panic preserves the source location. // Needed as we can't use `RUST_BACKTRACE` in here. - panic!( - "exec.prefab_module.initial can't be greater than exec.prefab_module.maximum; + panic!( + "exec.prefab_module.initial can't be greater than exec.prefab_module.maximum; thus Memory::new must not fail; qed" - ) - }); + ) + }); let mut imports = sp_sandbox::EnvironmentDefinitionBuilder::new(); imports.add_memory(self::prepare::IMPORT_MODULE_MEMORY, "memory", memory.clone()); @@ -204,11 +202,7 @@ where imports.add_host_func(module, name, func_ptr); }); - let mut runtime = Runtime::new( - ext, - input_data, - memory, - ); + let mut runtime = Runtime::new(ext, input_data, memory); // We store before executing so that the code hash is available in the constructor. let code = self.code.clone(); @@ -245,31 +239,27 @@ where mod tests { use super::*; use crate::{ - CodeHash, BalanceOf, Error, Pallet as Contracts, exec::{ - Ext, StorageKey, AccountIdOf, Executable, SeedOf, BlockNumberOf, - RentParams, ExecError, ErrorOrigin, + AccountIdOf, BlockNumberOf, ErrorOrigin, ExecError, Executable, Ext, RentParams, + SeedOf, StorageKey, }, gas::GasMeter, rent::RentStatus, - tests::{Test, Call, ALICE, BOB}, + tests::{Call, Test, ALICE, BOB}, + BalanceOf, CodeHash, Error, Pallet as Contracts, }; - use std::{ - borrow::BorrowMut, - cell::RefCell, - collections::HashMap, - }; - use sp_core::{Bytes, H256}; - use hex_literal::hex; - use sp_runtime::DispatchError; + use assert_matches::assert_matches; use frame_support::{ assert_ok, dispatch::{DispatchResult, DispatchResultWithPostInfo}, weights::Weight, }; - use assert_matches::assert_matches; + use hex_literal::hex; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; + use sp_core::{Bytes, H256}; + use sp_runtime::DispatchError; + use std::{borrow::BorrowMut, cell::RefCell, collections::HashMap}; #[derive(Debug, PartialEq, Eq)] struct RestoreEntry { @@ -360,12 +350,7 @@ mod tests { data: Vec, allows_reentry: bool, ) -> Result { - self.calls.push(CallEntry { - to, - value, - data, - allows_reentry, - }); + self.calls.push(CallEntry { to, value, data, allows_reentry }); Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: call_return_data() }) } fn instantiate( @@ -385,30 +370,15 @@ mod tests { }); Ok(( Contracts::::contract_address(&ALICE, &code_hash, salt), - ExecReturnValue { - flags: ReturnFlags::empty(), - data: Bytes(Vec::new()), - }, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, )) } - fn transfer( - &mut self, - to: &AccountIdOf, - value: u64, - ) -> Result<(), DispatchError> { - self.transfers.push(TransferEntry { - to: to.clone(), - value, - }); + fn transfer(&mut self, to: &AccountIdOf, value: u64) -> Result<(), DispatchError> { + self.transfers.push(TransferEntry { to: to.clone(), value }); Ok(()) } - fn terminate( - &mut self, - beneficiary: &AccountIdOf, - ) -> Result<(), DispatchError> { - self.terminations.push(TerminationEntry { - beneficiary: beneficiary.clone(), - }); + fn terminate(&mut self, beneficiary: &AccountIdOf) -> Result<(), DispatchError> { + self.terminations.push(TerminationEntry { beneficiary: beneficiary.clone() }); Ok(()) } fn restore_to( @@ -418,12 +388,7 @@ mod tests { rent_allowance: u64, delta: Vec, ) -> Result<(), DispatchError> { - self.restores.push(RestoreEntry { - dest, - code_hash, - rent_allowance, - delta, - }); + self.restores.push(RestoreEntry { dest, code_hash, rent_allowance, delta }); Ok(()) } fn get_storage(&mut self, key: &StorageKey) -> Option> { @@ -466,8 +431,12 @@ mod tests { fn rent_allowance(&mut self) -> u64 { self.rent_allowance } - fn block_number(&self) -> u64 { 121 } - fn max_value_size(&self) -> u32 { 16_384 } + fn block_number(&self) -> u64 { + 121 + } + fn max_value_size(&self) -> u32 { + 16_384 + } fn get_weight_price(&self, weight: Weight) -> BalanceOf { BalanceOf::::from(1312_u32).saturating_mul(weight.into()) } @@ -493,16 +462,11 @@ mod tests { } } - fn execute>( - wat: &str, - input_data: Vec, - mut ext: E, - ) -> ExecResult - { + fn execute>(wat: &str, input_data: Vec, mut ext: E) -> ExecResult { let wasm = wat::parse_str(wat).unwrap(); let schedule = crate::Schedule::default(); - let executable = PrefabWasmModule::<::T>::from_code(wasm, &schedule) - .unwrap(); + let executable = + PrefabWasmModule::<::T>::from_code(wasm, &schedule).unwrap(); executable.execute(ext.borrow_mut(), &ExportedFunction::Call, input_data) } @@ -543,19 +507,9 @@ mod tests { #[test] fn contract_transfer() { let mut mock_ext = MockExt::default(); - assert_ok!(execute( - CODE_TRANSFER, - vec![], - &mut mock_ext, - )); + assert_ok!(execute(CODE_TRANSFER, vec![], &mut mock_ext,)); - assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: ALICE, - value: 153, - }] - ); + assert_eq!(&mock_ext.transfers, &[TransferEntry { to: ALICE, value: 153 }]); } const CODE_CALL: &str = r#" @@ -607,20 +561,11 @@ mod tests { #[test] fn contract_call() { let mut mock_ext = MockExt::default(); - assert_ok!(execute( - CODE_CALL, - vec![], - &mut mock_ext, - )); + assert_ok!(execute(CODE_CALL, vec![], &mut mock_ext,)); assert_eq!( &mock_ext.calls, - &[CallEntry { - to: ALICE, - value: 6, - data: vec![1, 2, 3, 4], - allows_reentry: true, - }] + &[CallEntry { to: ALICE, value: 6, data: vec![1, 2, 3, 4], allows_reentry: true }] ); } @@ -675,12 +620,7 @@ mod tests { assert_eq!( &mock_ext.calls, - &[CallEntry { - to: ALICE, - value: 0x2a, - data: input, - allows_reentry: false, - }] + &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: false }] ); } @@ -736,12 +676,7 @@ mod tests { assert_eq!(result.data.0, input); assert_eq!( &mock_ext.calls, - &[CallEntry { - to: ALICE, - value: 0x2a, - data: input, - allows_reentry: true, - }] + &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: true }] ); } @@ -789,12 +724,7 @@ mod tests { assert_eq!(result.data, call_return_data()); assert_eq!( &mock_ext.calls, - &[CallEntry { - to: ALICE, - value: 0x2a, - data: input, - allows_reentry: false, - }] + &[CallEntry { to: ALICE, value: 0x2a, data: input, allows_reentry: false }] ); } @@ -857,11 +787,7 @@ mod tests { #[test] fn contract_instantiate() { let mut mock_ext = MockExt::default(); - assert_ok!(execute( - CODE_INSTANTIATE, - vec![], - &mut mock_ext, - )); + assert_ok!(execute(CODE_INSTANTIATE, vec![], &mut mock_ext,)); assert_matches!( &mock_ext.instantiates[..], @@ -905,18 +831,9 @@ mod tests { #[test] fn contract_terminate() { let mut mock_ext = MockExt::default(); - execute( - CODE_TERMINATE, - vec![], - &mut mock_ext, - ).unwrap(); + execute(CODE_TERMINATE, vec![], &mut mock_ext).unwrap(); - assert_eq!( - &mock_ext.terminations, - &[TerminationEntry { - beneficiary: ALICE, - }] - ); + assert_eq!(&mock_ext.terminations, &[TerminationEntry { beneficiary: ALICE }]); } const CODE_TRANSFER_LIMITED_GAS: &str = r#" @@ -967,20 +884,11 @@ mod tests { #[test] fn contract_call_limited_gas() { let mut mock_ext = MockExt::default(); - assert_ok!(execute( - &CODE_TRANSFER_LIMITED_GAS, - vec![], - &mut mock_ext, - )); + assert_ok!(execute(&CODE_TRANSFER_LIMITED_GAS, vec![], &mut mock_ext,)); assert_eq!( &mock_ext.calls, - &[CallEntry { - to: ALICE, - value: 6, - data: vec![1, 2, 3, 4], - allows_reentry: true, - }] + &[CallEntry { to: ALICE, value: 6, data: vec![1, 2, 3, 4], allows_reentry: true }] ); } @@ -1051,20 +959,14 @@ mod tests { #[test] fn get_storage_puts_data_into_buf() { let mut mock_ext = MockExt::default(); - mock_ext - .storage - .insert([0x11; 32], [0x22; 32].to_vec()); + mock_ext.storage.insert([0x11; 32], [0x22; 32].to_vec()); - let output = execute( - CODE_GET_STORAGE, - vec![], - mock_ext, - ).unwrap(); - - assert_eq!(output, ExecReturnValue { - flags: ReturnFlags::empty(), - data: Bytes([0x22; 32].to_vec()) - }); + let output = execute(CODE_GET_STORAGE, vec![], mock_ext).unwrap(); + + assert_eq!( + output, + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes([0x22; 32].to_vec()) } + ); } /// calls `seal_caller` and compares the result with the constant 42. @@ -1112,11 +1014,7 @@ mod tests { #[test] fn caller() { - assert_ok!(execute( - CODE_CALLER, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_CALLER, vec![], MockExt::default(),)); } /// calls `seal_address` and compares the result with the constant 69. @@ -1164,11 +1062,7 @@ mod tests { #[test] fn address() { - assert_ok!(execute( - CODE_ADDRESS, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_ADDRESS, vec![], MockExt::default(),)); } const CODE_BALANCE: &str = r#" @@ -1214,11 +1108,7 @@ mod tests { #[test] fn balance() { - assert_ok!(execute( - CODE_BALANCE, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_BALANCE, vec![], MockExt::default(),)); } const CODE_GAS_PRICE: &str = r#" @@ -1264,11 +1154,7 @@ mod tests { #[test] fn gas_price() { - assert_ok!(execute( - CODE_GAS_PRICE, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_GAS_PRICE, vec![], MockExt::default(),)); } const CODE_GAS_LEFT: &str = r#" @@ -1315,11 +1201,7 @@ mod tests { let mut ext = MockExt::default(); let gas_limit = ext.gas_meter.gas_left(); - let output = execute( - CODE_GAS_LEFT, - vec![], - &mut ext, - ).unwrap(); + let output = execute(CODE_GAS_LEFT, vec![], &mut ext).unwrap(); let gas_left = Weight::decode(&mut &*output.data).unwrap(); let actual_left = ext.gas_meter.gas_left(); @@ -1370,11 +1252,7 @@ mod tests { #[test] fn value_transferred() { - assert_ok!(execute( - CODE_VALUE_TRANSFERRED, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_VALUE_TRANSFERRED, vec![], MockExt::default(),)); } const CODE_RETURN_FROM_START_FN: &str = r#" @@ -1403,18 +1281,11 @@ mod tests { #[test] fn return_from_start_fn() { - let output = execute( - CODE_RETURN_FROM_START_FN, - vec![], - MockExt::default(), - ).unwrap(); + let output = execute(CODE_RETURN_FROM_START_FN, vec![], MockExt::default()).unwrap(); assert_eq!( output, - ExecReturnValue { - flags: ReturnFlags::empty(), - data: Bytes(vec![1, 2, 3, 4]) - } + ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) } ); } @@ -1461,11 +1332,7 @@ mod tests { #[test] fn now() { - assert_ok!(execute( - CODE_TIMESTAMP_NOW, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_TIMESTAMP_NOW, vec![], MockExt::default(),)); } const CODE_MINIMUM_BALANCE: &str = r#" @@ -1510,11 +1377,7 @@ mod tests { #[test] fn minimum_balance() { - assert_ok!(execute( - CODE_MINIMUM_BALANCE, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_MINIMUM_BALANCE, vec![], MockExt::default(),)); } const CODE_TOMBSTONE_DEPOSIT: &str = r#" @@ -1559,11 +1422,7 @@ mod tests { #[test] fn tombstone_deposit() { - assert_ok!(execute( - CODE_TOMBSTONE_DEPOSIT, - vec![], - MockExt::default(), - )); + assert_ok!(execute(CODE_TOMBSTONE_DEPOSIT, vec![], MockExt::default(),)); } const CODE_RANDOM: &str = r#" @@ -1622,11 +1481,7 @@ mod tests { #[test] fn random() { - let output = execute( - CODE_RANDOM, - vec![], - MockExt::default(), - ).unwrap(); + let output = execute(CODE_RANDOM, vec![], MockExt::default()).unwrap(); // The mock ext just returns the same data that was passed as the subject. assert_eq!( @@ -1697,26 +1552,24 @@ mod tests { #[test] fn random_v1() { - let output = execute( - CODE_RANDOM_V1, - vec![], - MockExt::default(), - ).unwrap(); + let output = execute(CODE_RANDOM_V1, vec![], MockExt::default()).unwrap(); // The mock ext just returns the same data that was passed as the subject. assert_eq!( output, ExecReturnValue { flags: ReturnFlags::empty(), - data: Bytes(( + data: Bytes( + ( hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F"), 42u64, - ).encode()), + ) + .encode() + ), }, ); } - const CODE_DEPOSIT_EVENT: &str = r#" (module (import "seal0" "seal_deposit_event" (func $seal_deposit_event (param i32 i32 i32 i32))) @@ -1743,16 +1596,15 @@ mod tests { #[test] fn deposit_event() { let mut mock_ext = MockExt::default(); - assert_ok!(execute( - CODE_DEPOSIT_EVENT, - vec![], - &mut mock_ext, - )); + assert_ok!(execute(CODE_DEPOSIT_EVENT, vec![], &mut mock_ext,)); - assert_eq!(mock_ext.events, vec![ - (vec![H256::repeat_byte(0x33)], - vec![0x00, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00]) - ]); + assert_eq!( + mock_ext.events, + vec![( + vec![H256::repeat_byte(0x33)], + vec![0x00, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00] + )] + ); assert!(mock_ext.gas_meter.gas_left() > 0); } @@ -1788,11 +1640,7 @@ mod tests { #[test] fn deposit_event_max_topics() { assert_eq!( - execute( - CODE_DEPOSIT_EVENT_MAX_TOPICS, - vec![], - MockExt::default(), - ), + execute(CODE_DEPOSIT_EVENT_MAX_TOPICS, vec![], MockExt::default(),), Err(ExecError { error: Error::::TooManyTopics.into(), origin: ErrorOrigin::Caller, @@ -1830,11 +1678,7 @@ mod tests { #[test] fn deposit_event_duplicates() { assert_eq!( - execute( - CODE_DEPOSIT_EVENT_DUPLICATES, - vec![], - MockExt::default(), - ), + execute(CODE_DEPOSIT_EVENT_DUPLICATES, vec![], MockExt::default(),), Err(ExecError { error: Error::::DuplicateTopics.into(), origin: ErrorOrigin::Caller, @@ -1887,11 +1731,7 @@ mod tests { #[test] fn block_number() { - let _ = execute( - CODE_BLOCK_NUMBER, - vec![], - MockExt::default(), - ).unwrap(); + let _ = execute(CODE_BLOCK_NUMBER, vec![], MockExt::default()).unwrap(); } const CODE_RETURN_WITH_DATA: &str = r#" @@ -1932,27 +1772,32 @@ mod tests { CODE_RETURN_WITH_DATA, hex!("00000000445566778899").to_vec(), MockExt::default(), - ).unwrap(); + ) + .unwrap(); - assert_eq!(output, ExecReturnValue { - flags: ReturnFlags::empty(), - data: Bytes(hex!("445566778899").to_vec()), - }); + assert_eq!( + output, + ExecReturnValue { + flags: ReturnFlags::empty(), + data: Bytes(hex!("445566778899").to_vec()), + } + ); assert!(output.is_success()); } #[test] fn return_with_revert_status() { - let output = execute( - CODE_RETURN_WITH_DATA, - hex!("010000005566778899").to_vec(), - MockExt::default(), - ).unwrap(); + let output = + execute(CODE_RETURN_WITH_DATA, hex!("010000005566778899").to_vec(), MockExt::default()) + .unwrap(); - assert_eq!(output, ExecReturnValue { - flags: ReturnFlags::REVERT, - data: Bytes(hex!("5566778899").to_vec()), - }); + assert_eq!( + output, + ExecReturnValue { + flags: ReturnFlags::REVERT, + data: Bytes(hex!("5566778899").to_vec()), + } + ); assert!(!output.is_success()); } @@ -1975,11 +1820,7 @@ mod tests { #[test] fn contract_out_of_bounds_access() { let mut mock_ext = MockExt::default(); - let result = execute( - CODE_OUT_OF_BOUNDS_ACCESS, - vec![], - &mut mock_ext, - ); + let result = execute(CODE_OUT_OF_BOUNDS_ACCESS, vec![], &mut mock_ext); assert_eq!( result, @@ -2009,11 +1850,7 @@ mod tests { #[test] fn contract_decode_length_ignored() { let mut mock_ext = MockExt::default(); - let result = execute( - CODE_DECODE_FAILURE, - vec![], - &mut mock_ext, - ); + let result = execute(CODE_DECODE_FAILURE, vec![], &mut mock_ext); // AccountID implements `MaxEncodeLen` and therefore the supplied length is // no longer needed nor used to determine how much is read from contract memory. assert_ok!(result); @@ -2051,17 +1888,11 @@ mod tests { (func (export "deploy")) ) "#; - let output = execute( - CODE_RENT_PARAMS, - vec![], - MockExt::default(), - ).unwrap(); + let output = execute(CODE_RENT_PARAMS, vec![], MockExt::default()).unwrap(); let rent_params = Bytes(>::default().encode()); assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: rent_params }); } - - #[test] #[cfg(feature = "unstable-interface")] fn rent_status_works() { @@ -2095,11 +1926,7 @@ mod tests { (func (export "deploy")) ) "#; - let output = execute( - CODE_RENT_STATUS, - vec![], - MockExt::default(), - ).unwrap(); + let output = execute(CODE_RENT_STATUS, vec![], MockExt::default()).unwrap(); let rent_status = Bytes(>::default().encode()); assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: rent_status }); } @@ -2126,11 +1953,7 @@ mod tests { ) "#; let mut ext = MockExt::default(); - execute( - CODE_DEBUG_MESSAGE, - vec![], - &mut ext, - ).unwrap(); + execute(CODE_DEBUG_MESSAGE, vec![], &mut ext).unwrap(); assert_eq!(std::str::from_utf8(&ext.debug_buffer).unwrap(), "Hello World!"); } @@ -2157,11 +1980,7 @@ mod tests { ) "#; let mut ext = MockExt::default(); - let result = execute( - CODE_DEBUG_MESSAGE_FAIL, - vec![], - &mut ext, - ); + let result = execute(CODE_DEBUG_MESSAGE_FAIL, vec![], &mut ext); assert_eq!( result, Err(ExecError { @@ -2213,15 +2032,8 @@ mod tests { use std::convert::TryInto; let call = Call::System(frame_system::Call::remark(b"Hello World".to_vec())); let mut ext = MockExt::default(); - let result = execute( - CODE_CALL_RUNTIME, - call.encode(), - &mut ext, - ).unwrap(); - assert_eq!( - *ext.runtime_calls.borrow(), - vec![call], - ); + let result = execute(CODE_CALL_RUNTIME, call.encode(), &mut ext).unwrap(); + assert_eq!(*ext.runtime_calls.borrow(), vec![call],); // 0 = ReturnCode::Success assert_eq!(u32::from_le_bytes(result.data.0.try_into().unwrap()), 0); } @@ -2230,11 +2042,7 @@ mod tests { #[cfg(feature = "unstable-interface")] fn call_runtime_panics_on_invalid_call() { let mut ext = MockExt::default(); - let result = execute( - CODE_CALL_RUNTIME, - vec![0x42], - &mut ext, - ); + let result = execute(CODE_CALL_RUNTIME, vec![0x42], &mut ext); assert_eq!( result, Err(ExecError { @@ -2242,9 +2050,6 @@ mod tests { origin: ErrorOrigin::Caller, }) ); - assert_eq!( - *ext.runtime_calls.borrow(), - vec![], - ); + assert_eq!(*ext.runtime_calls.borrow(), vec![],); } } diff --git a/substrate/frame/contracts/src/wasm/prepare.rs b/substrate/frame/contracts/src/wasm/prepare.rs index 2b52d9438904d345c36ab078236970efebcee943..280dedc39e66adb81ac61647ef9ce3704ba1f300 100644 --- a/substrate/frame/contracts/src/wasm/prepare.rs +++ b/substrate/frame/contracts/src/wasm/prepare.rs @@ -20,11 +20,11 @@ //! from a module. use crate::{ - Schedule, Config, chain_extension::ChainExtension, - wasm::{PrefabWasmModule, env_def::ImportSatisfyCheck}, + wasm::{env_def::ImportSatisfyCheck, PrefabWasmModule}, + Config, Schedule, }; -use pwasm_utils::parity_wasm::elements::{self, Internal, External, MemoryType, Type, ValueType}; +use pwasm_utils::parity_wasm::elements::{self, External, Internal, MemoryType, Type, ValueType}; use sp_runtime::traits::Hash; use sp_std::prelude::*; @@ -43,10 +43,7 @@ impl<'a, T: Config> ContractModule<'a, T> { /// /// Returns `Err` if the `original_code` couldn't be decoded or /// if it contains an invalid module. - fn new( - original_code: &[u8], - schedule: &'a Schedule, - ) -> Result { + fn new(original_code: &[u8], schedule: &'a Schedule) -> Result { use wasmi_validation::{validate_module, PlainValidator}; let module = @@ -57,10 +54,7 @@ impl<'a, T: Config> ContractModule<'a, T> { // Return a `ContractModule` instance with // __valid__ module. - Ok(ContractModule { - module, - schedule, - }) + Ok(ContractModule { module, schedule }) } /// Ensures that module doesn't declare internal memories. @@ -69,11 +63,8 @@ impl<'a, T: Config> ContractModule<'a, T> { /// Memory section contains declarations of internal linear memories, so if we find one /// we reject such a module. fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { - if self.module - .memory_section() - .map_or(false, |ms| ms.entries().len() > 0) - { - return Err("module declares internal memory"); + if self.module.memory_section().map_or(false, |ms| ms.entries().len() > 0) { + return Err("module declares internal memory") } Ok(()) } @@ -84,7 +75,7 @@ impl<'a, T: Config> ContractModule<'a, T> { // In Wasm MVP spec, there may be at most one table declared. Double check this // explicitly just in case the Wasm version changes. if table_section.entries().len() > 1 { - return Err("multiple tables declared"); + return Err("multiple tables declared") } if let Some(table_type) = table_section.entries().first() { // Check the table's initial size as there is no instruction or environment function @@ -102,7 +93,7 @@ impl<'a, T: Config> ContractModule<'a, T> { let code_section = if let Some(type_section) = self.module.code_section() { type_section } else { - return Ok(()); + return Ok(()) }; for instr in code_section.bodies().iter().flat_map(|body| body.code().elements()) { use self::elements::Instruction::BrTable; @@ -131,7 +122,7 @@ impl<'a, T: Config> ContractModule<'a, T> { match global.global_type().content_type() { ValueType::F32 | ValueType::F64 => return Err("use of floating point type in globals is forbidden"), - _ => {} + _ => {}, } } } @@ -142,7 +133,7 @@ impl<'a, T: Config> ContractModule<'a, T> { match local.value_type() { ValueType::F32 | ValueType::F64 => return Err("use of floating point type in locals is forbidden"), - _ => {} + _ => {}, } } } @@ -156,11 +147,13 @@ impl<'a, T: Config> ContractModule<'a, T> { for value_type in func_type.params().iter().chain(return_type) { match value_type { ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in function types is forbidden"), - _ => {} + return Err( + "use of floating point type in function types is forbidden", + ), + _ => {}, } } - } + }, } } } @@ -173,12 +166,12 @@ impl<'a, T: Config> ContractModule<'a, T> { let type_section = if let Some(type_section) = self.module.type_section() { type_section } else { - return Ok(()); + return Ok(()) }; for Type::Function(func) in type_section.types() { if func.params().len() > limit as usize { - return Err("Use of a function type with too many parameters."); + return Err("Use of a function type with too many parameters.") } } @@ -187,26 +180,18 @@ impl<'a, T: Config> ContractModule<'a, T> { fn inject_gas_metering(self) -> Result { let gas_rules = self.schedule.rules(&self.module); - let contract_module = pwasm_utils::inject_gas_counter( - self.module, - &gas_rules, - "seal0", - ).map_err(|_| "gas instrumentation failed")?; - Ok(ContractModule { - module: contract_module, - schedule: self.schedule, - }) + let contract_module = pwasm_utils::inject_gas_counter(self.module, &gas_rules, "seal0") + .map_err(|_| "gas instrumentation failed")?; + Ok(ContractModule { module: contract_module, schedule: self.schedule }) } fn inject_stack_height_metering(self) -> Result { - let contract_module = - pwasm_utils::stack_height - ::inject_limiter(self.module, self.schedule.limits.stack_height) - .map_err(|_| "stack height instrumentation failed")?; - Ok(ContractModule { - module: contract_module, - schedule: self.schedule, - }) + let contract_module = pwasm_utils::stack_height::inject_limiter( + self.module, + self.schedule.limits.stack_height, + ) + .map_err(|_| "stack height instrumentation failed")?; + Ok(ContractModule { module: contract_module, schedule: self.schedule }) } /// Check that the module has required exported functions. For now @@ -223,14 +208,8 @@ impl<'a, T: Config> ContractModule<'a, T> { let module = &self.module; let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let export_entries = module - .export_section() - .map(|is| is.entries()) - .unwrap_or(&[]); - let func_entries = module - .function_section() - .map(|fs| fs.entries()) - .unwrap_or(&[]); + let export_entries = module.export_section().map(|is| is.entries()).unwrap_or(&[]); + let func_entries = module.function_section().map(|fs| fs.entries()).unwrap_or(&[]); // Function index space consists of imported function following by // declared functions. Calculate the total number of imported functions so @@ -240,11 +219,9 @@ impl<'a, T: Config> ContractModule<'a, T> { .map(|is| is.entries()) .unwrap_or(&[]) .iter() - .filter(|entry| { - match *entry.external() { - External::Function(_) => true, - _ => false, - } + .filter(|entry| match *entry.external() { + External::Function(_) => true, + _ => false, }) .count(); @@ -267,32 +244,32 @@ impl<'a, T: Config> ContractModule<'a, T> { Some(fn_idx) => fn_idx, None => { // Underflow here means fn_idx points to imported function which we don't allow! - return Err("entry point points to an imported function"); - } + return Err("entry point points to an imported function") + }, }; // Then check the signature. // Both "call" and "deploy" has a () -> () function type. // We still support () -> (i32) for backwards compatibility. - let func_ty_idx = func_entries.get(fn_idx as usize) + let func_ty_idx = func_entries + .get(fn_idx as usize) .ok_or_else(|| "export refers to non-existent function")? .type_ref(); let Type::Function(ref func_ty) = types .get(func_ty_idx as usize) .ok_or_else(|| "function has a non-existent type")?; - if !( - func_ty.params().is_empty() && - (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32]) - ) { - return Err("entry point has wrong signature"); + if !(func_ty.params().is_empty() && + (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32])) + { + return Err("entry point has wrong signature") } } if !deploy_found { - return Err("deploy function isn't exported"); + return Err("deploy function isn't exported") } if !call_found { - return Err("call function isn't exported"); + return Err("call function isn't exported") } Ok(()) @@ -306,16 +283,14 @@ impl<'a, T: Config> ContractModule<'a, T> { /// their signatures. /// - if there is a memory import, returns it's descriptor /// `import_fn_banlist`: list of function names that are disallowed to be imported - fn scan_imports(&self, import_fn_banlist: &[&[u8]]) - -> Result, &'static str> - { + fn scan_imports( + &self, + import_fn_banlist: &[&[u8]], + ) -> Result, &'static str> { let module = &self.module; let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let import_entries = module - .import_section() - .map(|is| is.entries()) - .unwrap_or(&[]); + let import_entries = module.import_section().map(|is| is.entries()).unwrap_or(&[]); let mut imported_mem_type = None; @@ -326,7 +301,7 @@ impl<'a, T: Config> ContractModule<'a, T> { &External::Function(ref type_idx) => type_idx, &External::Memory(ref memory_type) => { if import.module() != IMPORT_MODULE_MEMORY { - return Err("Invalid module for imported memory"); + return Err("Invalid module for imported memory") } if import.field() != "memory" { return Err("Memory import must have the field name 'memory'") @@ -335,8 +310,8 @@ impl<'a, T: Config> ContractModule<'a, T> { return Err("Multiple memory imports defined") } imported_mem_type = Some(memory_type); - continue; - } + continue + }, }; let Type::Function(ref func_ty) = types @@ -346,48 +321,44 @@ impl<'a, T: Config> ContractModule<'a, T> { if !T::ChainExtension::enabled() && import.field().as_bytes() == b"seal_call_chain_extension" { - return Err("module uses chain extensions but chain extensions are disabled"); + return Err("module uses chain extensions but chain extensions are disabled") } - if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) - || !C::can_satisfy( - import.module().as_bytes(), import.field().as_bytes(), func_ty, - ) + if import_fn_banlist.iter().any(|f| import.field().as_bytes() == *f) || + !C::can_satisfy(import.module().as_bytes(), import.field().as_bytes(), func_ty) { - return Err("module imports a non-existent function"); + return Err("module imports a non-existent function") } } Ok(imported_mem_type) } fn into_wasm_code(self) -> Result, &'static str> { - elements::serialize(self.module) - .map_err(|_| "error serializing instrumented module") + elements::serialize(self.module).map_err(|_| "error serializing instrumented module") } } -fn get_memory_limits(module: Option<&MemoryType>, schedule: &Schedule) - -> Result<(u32, u32), &'static str> -{ +fn get_memory_limits( + module: Option<&MemoryType>, + schedule: &Schedule, +) -> Result<(u32, u32), &'static str> { if let Some(memory_type) = module { // Inspect the module to extract the initial and maximum page count. let limits = memory_type.limits(); match (limits.initial(), limits.maximum()) { - (initial, Some(maximum)) if initial > maximum => { + (initial, Some(maximum)) if initial > maximum => return Err( "Requested initial number of pages should not exceed the requested maximum", - ); - } - (_, Some(maximum)) if maximum > schedule.limits.memory_pages => { - return Err("Maximum number of pages should not exceed the configured maximum."); - } + ), + (_, Some(maximum)) if maximum > schedule.limits.memory_pages => + return Err("Maximum number of pages should not exceed the configured maximum."), (initial, Some(maximum)) => Ok((initial, maximum)), (_, None) => { // Maximum number of pages should be always declared. // This isn't a hard requirement and can be treated as a maximum set // to configured maximum. - return Err("Maximum number of pages should be always declared."); - } + return Err("Maximum number of pages should be always declared.") + }, } } else { // If none memory imported then just crate an empty placeholder. @@ -411,10 +382,8 @@ fn check_and_instrument( // We disallow importing `gas` function here since it is treated as implementation detail. let disallowed_imports = [b"gas".as_ref()]; - let memory_limits = get_memory_limits( - contract_module.scan_imports::(&disallowed_imports)?, - schedule - )?; + let memory_limits = + get_memory_limits(contract_module.scan_imports::(&disallowed_imports)?, schedule)?; let code = contract_module .inject_gas_metering()? @@ -428,10 +397,8 @@ fn do_preparation( original_code: Vec, schedule: &Schedule, ) -> Result, &'static str> { - let (code, (initial, maximum)) = check_and_instrument::( - original_code.as_ref(), - schedule, - )?; + let (code, (initial, maximum)) = + check_and_instrument::(original_code.as_ref(), schedule)?; Ok(PrefabWasmModule { instruction_weights_version: schedule.instruction_weights.version, initial, @@ -483,8 +450,7 @@ pub fn reinstrument_contract( /// in production code. #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking { - use super::*; - use super::elements::FunctionType; + use super::{elements::FunctionType, *}; impl ImportSatisfyCheck for () { fn can_satisfy(_module: &[u8], _name: &[u8], _func_type: &FunctionType) -> bool { @@ -493,9 +459,10 @@ pub mod benchmarking { } /// Prepare function that neither checks nor instruments the passed in code. - pub fn prepare_contract(original_code: Vec, schedule: &Schedule) - -> Result, &'static str> - { + pub fn prepare_contract( + original_code: Vec, + schedule: &Schedule, + ) -> Result, &'static str> { let contract_module = ContractModule::new(&original_code, schedule)?; let memory_limits = get_memory_limits(contract_module.scan_imports::<()>(&[])?, schedule)?; Ok(PrefabWasmModule { @@ -566,7 +533,8 @@ mod tests { }; } - prepare_test!(no_floats, + prepare_test!( + no_floats, r#" (module (func (export "call") @@ -585,7 +553,8 @@ mod tests { mod functions { use super::*; - prepare_test!(param_number_valid, + prepare_test!( + param_number_valid, r#" (module (func (export "call")) @@ -596,7 +565,8 @@ mod tests { Ok(_) ); - prepare_test!(param_number_invalid, + prepare_test!( + param_number_invalid, r#" (module (func (export "call")) @@ -612,7 +582,8 @@ mod tests { mod globals { use super::*; - prepare_test!(global_number_valid, + prepare_test!( + global_number_valid, r#" (module (global i64 (i64.const 0)) @@ -625,7 +596,8 @@ mod tests { Ok(_) ); - prepare_test!(global_number_too_high, + prepare_test!( + global_number_too_high, r#" (module (global i64 (i64.const 0)) @@ -643,7 +615,8 @@ mod tests { mod memories { use super::*; - prepare_test!(memory_with_one_page, + prepare_test!( + memory_with_one_page, r#" (module (import "env" "memory" (memory 1 1)) @@ -655,7 +628,8 @@ mod tests { Ok(_) ); - prepare_test!(internal_memory_declaration, + prepare_test!( + internal_memory_declaration, r#" (module (memory 1 1) @@ -667,7 +641,8 @@ mod tests { Err("module declares internal memory") ); - prepare_test!(no_memory_import, + prepare_test!( + no_memory_import, r#" (module ;; no memory imported @@ -678,7 +653,8 @@ mod tests { Ok(_) ); - prepare_test!(initial_exceeds_maximum, + prepare_test!( + initial_exceeds_maximum, r#" (module (import "env" "memory" (memory 16 1)) @@ -690,7 +666,8 @@ mod tests { Err("Module is not valid") ); - prepare_test!(no_maximum, + prepare_test!( + no_maximum, r#" (module (import "env" "memory" (memory 1)) @@ -702,7 +679,8 @@ mod tests { Err("Maximum number of pages should be always declared.") ); - prepare_test!(requested_maximum_valid, + prepare_test!( + requested_maximum_valid, r#" (module (import "env" "memory" (memory 1 16)) @@ -714,7 +692,8 @@ mod tests { Ok(_) ); - prepare_test!(requested_maximum_exceeds_configured_maximum, + prepare_test!( + requested_maximum_exceeds_configured_maximum, r#" (module (import "env" "memory" (memory 1 17)) @@ -726,7 +705,8 @@ mod tests { Err("Maximum number of pages should not exceed the configured maximum.") ); - prepare_test!(field_name_not_memory, + prepare_test!( + field_name_not_memory, r#" (module (import "env" "forgetit" (memory 1 1)) @@ -738,7 +718,8 @@ mod tests { Err("Memory import must have the field name 'memory'") ); - prepare_test!(multiple_memory_imports, + prepare_test!( + multiple_memory_imports, r#" (module (import "env" "memory" (memory 1 1)) @@ -751,7 +732,8 @@ mod tests { Err("Module is not valid") ); - prepare_test!(table_import, + prepare_test!( + table_import, r#" (module (import "seal0" "table" (table 1 anyfunc)) @@ -763,7 +745,8 @@ mod tests { Err("Cannot import tables") ); - prepare_test!(global_import, + prepare_test!( + global_import, r#" (module (global $g (import "seal0" "global") i32) @@ -778,7 +761,8 @@ mod tests { mod tables { use super::*; - prepare_test!(no_tables, + prepare_test!( + no_tables, r#" (module (func (export "call")) @@ -788,7 +772,8 @@ mod tests { Ok(_) ); - prepare_test!(table_valid_size, + prepare_test!( + table_valid_size, r#" (module (table 3 funcref) @@ -800,7 +785,8 @@ mod tests { Ok(_) ); - prepare_test!(table_too_big, + prepare_test!( + table_too_big, r#" (module (table 4 funcref) @@ -811,7 +797,8 @@ mod tests { Err("table exceeds maximum size allowed") ); - prepare_test!(br_table_valid_size, + prepare_test!( + br_table_valid_size, r#" (module (func (export "call")) @@ -825,7 +812,8 @@ mod tests { Ok(_) ); - prepare_test!(br_table_too_big, + prepare_test!( + br_table_too_big, r#" (module (func (export "call")) @@ -842,7 +830,8 @@ mod tests { mod imports { use super::*; - prepare_test!(can_import_legit_function, + prepare_test!( + can_import_legit_function, r#" (module (import "seal0" "nop" (func (param i64))) @@ -856,7 +845,8 @@ mod tests { // even though gas is defined the contract can't import it since // it is an implementation defined. - prepare_test!(can_not_import_gas_function, + prepare_test!( + can_not_import_gas_function, r#" (module (import "seal0" "gas" (func (param i32))) @@ -869,7 +859,8 @@ mod tests { ); // memory is in "env" and not in "seal0" - prepare_test!(memory_not_in_seal0, + prepare_test!( + memory_not_in_seal0, r#" (module (import "seal0" "memory" (memory 1 1)) @@ -882,7 +873,8 @@ mod tests { ); // memory is in "env" and not in some arbitrary module - prepare_test!(memory_not_in_arbitrary_module, + prepare_test!( + memory_not_in_arbitrary_module, r#" (module (import "any_module" "memory" (memory 1 1)) @@ -894,7 +886,8 @@ mod tests { Err("Invalid module for imported memory") ); - prepare_test!(function_in_other_module_works, + prepare_test!( + function_in_other_module_works, r#" (module (import "seal1" "nop" (func (param i32))) @@ -907,7 +900,8 @@ mod tests { ); // wrong signature - prepare_test!(wrong_signature, + prepare_test!( + wrong_signature, r#" (module (import "seal0" "gas" (func (param i64))) @@ -919,7 +913,8 @@ mod tests { Err("module imports a non-existent function") ); - prepare_test!(unknown_func_name, + prepare_test!( + unknown_func_name, r#" (module (import "seal0" "unknown_func" (func)) @@ -935,7 +930,8 @@ mod tests { mod entrypoints { use super::*; - prepare_test!(it_works, + prepare_test!( + it_works, r#" (module (func (export "call")) @@ -945,7 +941,8 @@ mod tests { Ok(_) ); - prepare_test!(omit_deploy, + prepare_test!( + omit_deploy, r#" (module (func (export "call")) @@ -954,7 +951,8 @@ mod tests { Err("deploy function isn't exported") ); - prepare_test!(omit_call, + prepare_test!( + omit_call, r#" (module (func (export "deploy")) @@ -964,7 +962,8 @@ mod tests { ); // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_entrypoint, + prepare_test!( + try_sneak_export_as_entrypoint, r#" (module (import "seal0" "panic" (func)) @@ -978,7 +977,8 @@ mod tests { ); // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_global, + prepare_test!( + try_sneak_export_as_global, r#" (module (func (export "deploy")) @@ -988,7 +988,8 @@ mod tests { Err("expected a function") ); - prepare_test!(wrong_signature, + prepare_test!( + wrong_signature, r#" (module (func (export "deploy")) @@ -998,7 +999,8 @@ mod tests { Err("entry point has wrong signature") ); - prepare_test!(unknown_exports, + prepare_test!( + unknown_exports, r#" (module (func (export "call")) @@ -1009,7 +1011,8 @@ mod tests { Err("unknown export: expecting only deploy and call functions") ); - prepare_test!(global_float, + prepare_test!( + global_float, r#" (module (global $x f32 (f32.const 0)) @@ -1020,7 +1023,8 @@ mod tests { Err("use of floating point type in globals is forbidden") ); - prepare_test!(local_float, + prepare_test!( + local_float, r#" (module (func $foo (local f32)) @@ -1031,7 +1035,8 @@ mod tests { Err("use of floating point type in locals is forbidden") ); - prepare_test!(param_float, + prepare_test!( + param_float, r#" (module (func $foo (param f32)) @@ -1042,7 +1047,8 @@ mod tests { Err("use of floating point type in function types is forbidden") ); - prepare_test!(result_float, + prepare_test!( + result_float, r#" (module (func $foo (result f32) (f32.const 0)) diff --git a/substrate/frame/contracts/src/wasm/runtime.rs b/substrate/frame/contracts/src/wasm/runtime.rs index 7b6004a84f06b35eb8af3e9d34032aeb6c1b4a94..c04f25766dc71982f1e6cb861c1179624fe54e91 100644 --- a/substrate/frame/contracts/src/wasm/runtime.rs +++ b/substrate/frame/contracts/src/wasm/runtime.rs @@ -18,25 +18,20 @@ //! Environment definition of the wasm smart-contract runtime. use crate::{ - Config, CodeHash, BalanceOf, Error, - exec::{Ext, StorageKey, TopicOf, ExecResult, ExecError}, - gas::{Token, ChargedAmount}, - wasm::env_def::ConvertibleToWasm, + exec::{ExecError, ExecResult, Ext, StorageKey, TopicOf}, + gas::{ChargedAmount, Token}, schedule::HostFnWeights, + wasm::env_def::ConvertibleToWasm, + BalanceOf, CodeHash, Config, Error, }; use bitflags::bitflags; -use pwasm_utils::parity_wasm::elements::ValueType; -use frame_support::{dispatch::DispatchError, ensure, weights::Weight}; -use sp_std::prelude::*; use codec::{Decode, DecodeAll, Encode, MaxEncodedLen}; -use sp_core::{Bytes, crypto::UncheckedFrom}; -use sp_io::hashing::{ - keccak_256, - blake2_256, - blake2_128, - sha2_256, -}; +use frame_support::{dispatch::DispatchError, ensure, weights::Weight}; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; +use pwasm_utils::parity_wasm::elements::ValueType; +use sp_core::{crypto::UncheckedFrom, Bytes}; +use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; +use sp_std::prelude::*; /// Every error that can be returned to a contract when it calls any of the host functions. /// @@ -178,7 +173,7 @@ pub enum RuntimeCosts { /// Weight of calling `seal_random`. It includes the weight for copying the subject. Random, /// Weight of calling `seal_deposit_event` with the given number of topics and event size. - DepositEvent{num_topic: u32, len: u32}, + DepositEvent { num_topic: u32, len: u32 }, /// Weight of calling `seal_debug_message`. #[cfg(feature = "unstable-interface")] DebugMessage, @@ -203,7 +198,7 @@ pub enum RuntimeCosts { /// Weight of calling `seal_instantiate` for the given input and salt without output weight. /// This includes the transfer as an instantiate without a value will always be below /// the existential deposit and is disregarded as corner case. - InstantiateBase{input_data_len: u32, salt_len: u32}, + InstantiateBase { input_data_len: u32, salt_len: u32 }, /// Weight of output received through `seal_instantiate` for the given size. InstantiateCopyOut(u32), /// Weight of calling `seal_hash_sha_256` for the given input size. @@ -228,7 +223,7 @@ impl RuntimeCosts { fn token(&self, s: &HostFnWeights) -> RuntimeToken where T: Config, - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { use self::RuntimeCosts::*; let weight = match *self { @@ -246,40 +241,44 @@ impl RuntimeCosts { WeightToFee => s.weight_to_fee, InputBase => s.input, InputCopyOut(len) => s.input_per_byte.saturating_mul(len.into()), - Return(len) => s.r#return - .saturating_add(s.return_per_byte.saturating_mul(len.into())), + Return(len) => s.r#return.saturating_add(s.return_per_byte.saturating_mul(len.into())), Terminate => s.terminate, - RestoreTo(delta) => s.restore_to - .saturating_add(s.restore_to_per_delta.saturating_mul(delta.into())), + RestoreTo(delta) => + s.restore_to.saturating_add(s.restore_to_per_delta.saturating_mul(delta.into())), Random => s.random, - DepositEvent{num_topic, len} => s.deposit_event + DepositEvent { num_topic, len } => s + .deposit_event .saturating_add(s.deposit_event_per_topic.saturating_mul(num_topic.into())) .saturating_add(s.deposit_event_per_byte.saturating_mul(len.into())), #[cfg(feature = "unstable-interface")] DebugMessage => s.debug_message, SetRentAllowance => s.set_rent_allowance, - SetStorage(len) => s.set_storage - .saturating_add(s.set_storage_per_byte.saturating_mul(len.into())), + SetStorage(len) => + s.set_storage.saturating_add(s.set_storage_per_byte.saturating_mul(len.into())), ClearStorage => s.clear_storage, GetStorageBase => s.get_storage, GetStorageCopyOut(len) => s.get_storage_per_byte.saturating_mul(len.into()), Transfer => s.transfer, - CallBase(len) => s.call - .saturating_add(s.call_per_input_byte.saturating_mul(len.into())), + CallBase(len) => + s.call.saturating_add(s.call_per_input_byte.saturating_mul(len.into())), CallSurchargeTransfer => s.call_transfer_surcharge, CallCopyOut(len) => s.call_per_output_byte.saturating_mul(len.into()), - InstantiateBase{input_data_len, salt_len} => s.instantiate + InstantiateBase { input_data_len, salt_len } => s + .instantiate .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), - InstantiateCopyOut(len) => s.instantiate_per_output_byte - .saturating_mul(len.into()), - HashSha256(len) => s.hash_sha2_256 + InstantiateCopyOut(len) => s.instantiate_per_output_byte.saturating_mul(len.into()), + HashSha256(len) => s + .hash_sha2_256 .saturating_add(s.hash_sha2_256_per_byte.saturating_mul(len.into())), - HashKeccak256(len) => s.hash_keccak_256 + HashKeccak256(len) => s + .hash_keccak_256 .saturating_add(s.hash_keccak_256_per_byte.saturating_mul(len.into())), - HashBlake256(len) => s.hash_blake2_256 + HashBlake256(len) => s + .hash_blake2_256 .saturating_add(s.hash_blake2_256_per_byte.saturating_mul(len.into())), - HashBlake128(len) => s.hash_blake2_128 + HashBlake128(len) => s + .hash_blake2_128 .saturating_add(s.hash_blake2_128_per_byte.saturating_mul(len.into())), ChainExtension(amount) => amount, #[cfg(feature = "unstable-interface")] @@ -306,7 +305,7 @@ struct RuntimeToken { impl Token for RuntimeToken where T: Config, - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { fn weight(&self) -> Weight { self.weight @@ -373,19 +372,10 @@ impl<'a, E> Runtime<'a, E> where E: Ext + 'a, ::AccountId: - UncheckedFrom<::Hash> + AsRef<[u8]> + UncheckedFrom<::Hash> + AsRef<[u8]>, { - pub fn new( - ext: &'a mut E, - input_data: Vec, - memory: sp_sandbox::Memory, - ) -> Self { - Runtime { - ext, - input_data: Some(input_data), - memory, - trap_reason: None, - } + pub fn new(ext: &'a mut E, input_data: Vec, memory: sp_sandbox::Memory) -> Self { + Runtime { ext, input_data: Some(input_data), memory, trap_reason: None } } /// Converts the sandbox result and the runtime state into the execution outcome. @@ -401,27 +391,15 @@ where if let Some(trap_reason) = self.trap_reason { return match trap_reason { // The trap was the result of the execution `return` host function. - TrapReason::Return(ReturnData{ flags, data }) => { - let flags = ReturnFlags::from_bits(flags).ok_or_else(|| - "used reserved bit in return flags" - )?; - Ok(ExecReturnValue { - flags, - data: Bytes(data), - }) - }, - TrapReason::Termination => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Bytes(Vec::new()), - }) - }, - TrapReason::Restoration => { - Ok(ExecReturnValue { - flags: ReturnFlags::empty(), - data: Bytes(Vec::new()), - }) + TrapReason::Return(ReturnData { flags, data }) => { + let flags = ReturnFlags::from_bits(flags) + .ok_or_else(|| "used reserved bit in return flags")?; + Ok(ExecReturnValue { flags, data: Bytes(data) }) }, + TrapReason::Termination => + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), + TrapReason::Restoration => + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), TrapReason::SupervisorError(error) => Err(error)?, } } @@ -429,9 +407,7 @@ where // Check the exact type of the error. match sandbox_result { // No traps were generated. Proceed normally. - Ok(_) => { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) - } + Ok(_) => Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }), // `Error::Module` is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). // This shouldn't happen because validation process ought to reject such binaries. @@ -441,7 +417,7 @@ where Err(sp_sandbox::Error::Module) => Err("validation error")?, // Any other kind of a trap should result in a failure. Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - Err(Error::::ContractTrapped)? + Err(Error::::ContractTrapped)?, } } @@ -484,12 +460,11 @@ where /// Returns `Err` if one of the following conditions occurs: /// /// - requested buffer is not within the bounds of the sandbox memory. - pub fn read_sandbox_memory(&self, ptr: u32, len: u32) - -> Result, DispatchError> - { + pub fn read_sandbox_memory(&self, ptr: u32, len: u32) -> Result, DispatchError> { ensure!(len <= self.ext.schedule().limits.max_memory_size(), Error::::OutOfBounds); let mut buf = vec![0u8; len as usize]; - self.memory.get(ptr, buf.as_mut_slice()) + self.memory + .get(ptr, buf.as_mut_slice()) .map_err(|_| Error::::OutOfBounds)?; Ok(buf) } @@ -499,9 +474,11 @@ where /// Returns `Err` if one of the following conditions occurs: /// /// - requested buffer is not within the bounds of the sandbox memory. - pub fn read_sandbox_memory_into_buf(&self, ptr: u32, buf: &mut [u8]) - -> Result<(), DispatchError> - { + pub fn read_sandbox_memory_into_buf( + &self, + ptr: u32, + buf: &mut [u8], + ) -> Result<(), DispatchError> { self.memory.get(ptr, buf).map_err(|_| Error::::OutOfBounds.into()) } @@ -511,9 +488,10 @@ where /// /// The weight of reading a fixed value is included in the overall weight of any /// contract callable function. - pub fn read_sandbox_memory_as(&self, ptr: u32) - -> Result - { + pub fn read_sandbox_memory_as( + &self, + ptr: u32, + ) -> Result { let buf = self.read_sandbox_memory(ptr, D::max_encoded_len() as u32)?; let decoded = D::decode_all(&mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; @@ -531,9 +509,11 @@ where /// /// There must be an extra benchmark for determining the influence of `len` with /// regard to the overall weight. - pub fn read_sandbox_memory_as_unbounded(&self, ptr: u32, len: u32) - -> Result - { + pub fn read_sandbox_memory_as_unbounded( + &self, + ptr: u32, + len: u32, + ) -> Result { let buf = self.read_sandbox_memory(ptr, len)?; let decoded = D::decode_all(&mut &buf[..]) .map_err(|_| DispatchError::from(Error::::DecodingFailed))?; @@ -566,10 +546,9 @@ where buf: &[u8], allow_skip: bool, create_token: impl FnOnce(u32) -> Option, - ) -> Result<(), DispatchError> - { + ) -> Result<(), DispatchError> { if allow_skip && out_ptr == u32::MAX { - return Ok(()); + return Ok(()) } let buf_len = buf.len() as u32; @@ -583,10 +562,10 @@ where self.charge_gas(costs)?; } - self.memory.set(out_ptr, buf).and_then(|_| { - self.memory.set(out_len_ptr, &buf_len.encode()) - }) - .map_err(|_| Error::::OutOfBounds)?; + self.memory + .set(out_ptr, buf) + .and_then(|_| self.memory.set(out_len_ptr, &buf_len.encode())) + .map_err(|_| Error::::OutOfBounds)?; Ok(()) } @@ -650,7 +629,7 @@ where x if x == not_funded => Ok(NewContractNotFunded), x if x == no_code => Ok(CodeNotFound), x if (x == not_found || x == is_tombstone || x == rent_not_paid) => Ok(NotCallable), - err => Err(err) + err => Err(err), } } @@ -665,7 +644,7 @@ where match (error, origin) { (_, Callee) => Ok(ReturnCode::CalleeTrapped), - (err, _) => Self::err_into_return_code(err) + (err, _) => Self::err_into_return_code(err), } } @@ -678,9 +657,8 @@ where input_data_ptr: u32, input_data_len: u32, output_ptr: u32, - output_len_ptr: u32 - ) -> Result - { + output_len_ptr: u32, + ) -> Result { self.charge_gas(RuntimeCosts::CallBase(input_data_len))?; let callee: <::T as frame_system::Config>::AccountId = self.read_sandbox_memory_as(callee_ptr)?; @@ -696,9 +674,8 @@ where self.charge_gas(RuntimeCosts::CallSurchargeTransfer)?; } let ext = &mut self.ext; - let call_outcome = ext.call( - gas, callee, value, input_data, flags.contains(CallFlags::ALLOW_REENTRY), - ); + let call_outcome = + ext.call(gas, callee, value, input_data, flags.contains(CallFlags::ALLOW_REENTRY)); // `TAIL_CALL` only matters on an `OK` result. Otherwise the call stack comes to // a halt anyways without anymore code being executed. @@ -707,7 +684,7 @@ where return Err(TrapReason::Return(ReturnData { flags: return_value.flags.bits(), data: return_value.data.0, - })); + })) } } @@ -731,10 +708,9 @@ where output_ptr: u32, output_len_ptr: u32, salt_ptr: u32, - salt_len: u32 - ) -> Result - { - self.charge_gas(RuntimeCosts::InstantiateBase {input_data_len, salt_len})?; + salt_len: u32, + ) -> Result { + self.charge_gas(RuntimeCosts::InstantiateBase { input_data_len, salt_len })?; let code_hash: CodeHash<::T> = self.read_sandbox_memory_as(code_hash_ptr)?; let value: BalanceOf<::T> = self.read_sandbox_memory_as(value_ptr)?; let input_data = self.read_sandbox_memory(input_data_ptr, input_data_len)?; @@ -743,7 +719,11 @@ where if let Ok((address, output)) = &instantiate_outcome { if !output.flags.contains(ReturnFlags::REVERT) { self.write_sandbox_output( - address_ptr, address_len_ptr, &address.encode(), true, already_charged, + address_ptr, + address_len_ptr, + &address.encode(), + true, + already_charged, )?; } self.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { @@ -767,13 +747,12 @@ where code_hash_ptr: u32, rent_allowance_ptr: u32, delta_ptr: u32, - delta_count: u32 + delta_count: u32, ) -> Result<(), TrapReason> { self.charge_gas(RuntimeCosts::RestoreTo(delta_count))?; let dest: <::T as frame_system::Config>::AccountId = self.read_sandbox_memory_as(dest_ptr)?; - let code_hash: CodeHash<::T> = - self.read_sandbox_memory_as(code_hash_ptr)?; + let code_hash: CodeHash<::T> = self.read_sandbox_memory_as(code_hash_ptr)?; let rent_allowance: BalanceOf<::T> = self.read_sandbox_memory_as(rent_allowance_ptr)?; let delta = { diff --git a/substrate/frame/contracts/src/weights.rs b/substrate/frame/contracts/src/weights.rs index 503d952b110ed2f298c6aa88a26fa1b831d585da..390873949ab63c31e789ffa373856fc88f644188 100644 --- a/substrate/frame/contracts/src/weights.rs +++ b/substrate/frame/contracts/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/democracy/src/benchmarking.rs b/substrate/frame/democracy/src/benchmarking.rs index ef2c7de27ba59417357093732f753a718aa146c4..1c506461408d6f949a206df66b240d31dc73ecaf 100644 --- a/substrate/frame/democracy/src/benchmarking.rs +++ b/substrate/frame/democracy/src/benchmarking.rs @@ -19,13 +19,15 @@ use super::*; -use frame_benchmarking::{benchmarks, account, whitelist_account, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelist_account}; use frame_support::{ assert_noop, assert_ok, - traits::{Currency, Get, EnsureOrigin, OnInitialize, UnfilteredDispatchable, schedule::DispatchTime}, + traits::{ + schedule::DispatchTime, Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable, + }, }; -use frame_system::{RawOrigin, Pallet as System, self}; -use sp_runtime::traits::{Bounded, One, BadOrigin}; +use frame_system::{Pallet as System, RawOrigin}; +use sp_runtime::traits::{BadOrigin, Bounded, One}; use crate::Pallet as Democracy; @@ -49,11 +51,7 @@ fn add_proposal(n: u32) -> Result { let value = T::MinimumDeposit::get(); let proposal_hash: T::Hash = T::Hashing::hash_of(&n); - Democracy::::propose( - RawOrigin::Signed(other).into(), - proposal_hash, - value.into(), - )?; + Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value.into())?; Ok(proposal_hash) } @@ -76,20 +74,15 @@ fn add_referendum(n: u32) -> Result { 63, frame_system::RawOrigin::Root.into(), Call::enact_proposal(proposal_hash, referendum_index).into(), - ).map_err(|_| "failed to schedule named")?; + ) + .map_err(|_| "failed to schedule named")?; Ok(referendum_index) } fn account_vote(b: BalanceOf) -> AccountVote> { - let v = Vote { - aye: true, - conviction: Conviction::Locked1x, - }; - - AccountVote::Standard { - vote: v, - balance: b, - } + let v = Vote { aye: true, conviction: Conviction::Locked1x }; + + AccountVote::Standard { vote: v, balance: b } } benchmarks! { @@ -224,8 +217,8 @@ benchmarks! { // Place our proposal in the external queue, too. let hash = T::Hashing::hash_of(&0); assert_ok!( - Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()) - ); + Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()) + ); // Add a referendum of our proposal. let referendum_index = add_referendum::(0)?; @@ -237,9 +230,9 @@ benchmarks! { verify { // Referendum has been canceled assert_noop!( - Democracy::::referendum_status(referendum_index), - Error::::ReferendumInvalid - ); + Democracy::::referendum_status(referendum_index), + Error::::ReferendumInvalid + ); } // Worst case scenario, we external propose a previously blacklisted proposal @@ -785,9 +778,4 @@ benchmarks! { } } - -impl_benchmark_test_suite!( - Democracy, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Democracy, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/democracy/src/conviction.rs b/substrate/frame/democracy/src/conviction.rs index c2dff741a9c23802b55a9ba12d56511a135b6c05..6b77acfab5b003f8eb129c2c2794c8e4e769f9c7 100644 --- a/substrate/frame/democracy/src/conviction.rs +++ b/substrate/frame/democracy/src/conviction.rs @@ -17,10 +17,13 @@ //! The conviction datatype. -use sp_std::{result::Result, convert::TryFrom}; -use sp_runtime::{RuntimeDebug, traits::{Zero, Bounded, CheckedMul, CheckedDiv}}; -use codec::{Encode, Decode}; use crate::types::Delegations; +use codec::{Decode, Encode}; +use sp_runtime::{ + traits::{Bounded, CheckedDiv, CheckedMul, Zero}, + RuntimeDebug, +}; +use sp_std::{convert::TryFrom, result::Result}; /// A value denoting the strength of conviction of a vote. #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] @@ -93,9 +96,10 @@ impl Conviction { } /// The votes of a voter of the given `balance` with our conviction. - pub fn votes< - B: From + Zero + Copy + CheckedMul + CheckedDiv + Bounded - >(self, capital: B) -> Delegations { + pub fn votes + Zero + Copy + CheckedMul + CheckedDiv + Bounded>( + self, + capital: B, + ) -> Delegations { let votes = match self { Conviction::None => capital.checked_div(&10u8.into()).unwrap_or_else(Zero::zero), x => capital.checked_mul(&u8::from(x).into()).unwrap_or_else(B::max_value), diff --git a/substrate/frame/democracy/src/lib.rs b/substrate/frame/democracy/src/lib.rs index 42b00b8682a41af677352e3dcddcc66522c2553b..65bc483d2e5b74ee78bc2cf407db306229769bb3 100644 --- a/substrate/frame/democracy/src/lib.rs +++ b/substrate/frame/democracy/src/lib.rs @@ -121,7 +121,7 @@ //! This call can only be made by the `ExternalMajorityOrigin`. //! //! - `external_propose_majority` - Schedules a proposal to become a majority-carries -//! referendum once it is legal for an externally proposed referendum. +//! referendum once it is legal for an externally proposed referendum. //! //! #### External Default Origin //! @@ -149,34 +149,36 @@ //! - `cancel_queued` - Cancels a proposal that is queued for enactment. //! - `clear_public_proposal` - Removes all public proposals. -#![recursion_limit="128"] +#![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_runtime::{ - DispatchResult, DispatchError, ArithmeticError, RuntimeDebug, - traits::{Zero, Hash, Dispatchable, Saturating, Bounded}, -}; -use codec::{Encode, Decode, Input}; +use codec::{Decode, Encode, Input}; use frame_support::{ - ensure, weights::Weight, + ensure, traits::{ - Currency, ReservableCurrency, LockableCurrency, WithdrawReasons, LockIdentifier, Get, - OnUnbalanced, BalanceStatus, schedule::{Named as ScheduleNamed, DispatchTime}, + schedule::{DispatchTime, Named as ScheduleNamed}, + BalanceStatus, Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, + ReservableCurrency, WithdrawReasons, }, + weights::Weight, }; +use sp_runtime::{ + traits::{Bounded, Dispatchable, Hash, Saturating, Zero}, + ArithmeticError, DispatchError, DispatchResult, RuntimeDebug, +}; +use sp_std::prelude::*; -mod vote_threshold; -mod vote; mod conviction; mod types; +mod vote; +mod vote_threshold; pub mod weights; -pub use weights::WeightInfo; -pub use vote_threshold::{Approved, VoteThreshold}; -pub use vote::{Vote, AccountVote, Voting}; pub use conviction::Conviction; -pub use types::{ReferendumInfo, ReferendumStatus, Tally, UnvoteScope, Delegations}; pub use pallet::*; +pub use types::{Delegations, ReferendumInfo, ReferendumStatus, Tally, UnvoteScope}; +pub use vote::{AccountVote, Vote, Voting}; +pub use vote_threshold::{Approved, VoteThreshold}; +pub use weights::WeightInfo; #[cfg(test)] mod tests; @@ -197,9 +199,11 @@ pub type PropIndex = u32; /// A referendum index. pub type ReferendumIndex = u32; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; #[derive(Clone, Encode, Decode, RuntimeDebug)] pub enum PreimageStatus { @@ -235,13 +239,16 @@ enum Releases { #[frame_support::pallet] pub mod pallet { - use sp_runtime::DispatchResult; + use super::*; use frame_support::{ - pallet_prelude::*, Parameter, - weights::{DispatchClass, Pays}, traits::EnsureOrigin, dispatch::DispatchResultWithPostInfo, + dispatch::DispatchResultWithPostInfo, + pallet_prelude::*, + traits::EnsureOrigin, + weights::{DispatchClass, Pays}, + Parameter, }; - use frame_system::{pallet_prelude::*, ensure_signed, ensure_root}; - use super::*; + use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + use sp_runtime::DispatchResult; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -249,12 +256,12 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + Sized { - type Proposal: Parameter + Dispatchable + From>; + type Proposal: Parameter + Dispatchable + From>; type Event: From> + IsType<::Event>; /// Currency type for this pallet. type Currency: ReservableCurrency - + LockableCurrency; + + LockableCurrency; /// The minimum period of locking and the period between a proposal being approved and enacted. /// @@ -323,7 +330,7 @@ pub mod pallet { /// /// The number of Vetoers for a proposal must be small, extrinsics are weighted according to /// [MAX_VETOERS](./const.MAX_VETOERS.html) - type VetoOrigin: EnsureOrigin; + type VetoOrigin: EnsureOrigin; /// Period in blocks where an external proposal may not be re-submitted after being vetoed. #[pallet::constant] @@ -334,7 +341,7 @@ pub mod pallet { type PreimageByteDeposit: Get>; /// An origin that can provide a preimage using operational extrinsics. - type OperationalPreimageOrigin: EnsureOrigin; + type OperationalPreimageOrigin: EnsureOrigin; /// Handler for the unbalanced reduction when slashing a preimage deposit. type Slash: OnUnbalanced>; @@ -370,18 +377,16 @@ pub mod pallet { /// The public proposals. Unsorted. The second item is the proposal's hash. #[pallet::storage] #[pallet::getter(fn public_props)] - pub type PublicProps = StorageValue<_, Vec<(PropIndex, T::Hash, T::AccountId)>, ValueQuery>; + pub type PublicProps = + StorageValue<_, Vec<(PropIndex, T::Hash, T::AccountId)>, ValueQuery>; /// Those who have locked a deposit. /// /// TWOX-NOTE: Safe, as increasing integer keys are safe. #[pallet::storage] #[pallet::getter(fn deposit_of)] - pub type DepositOf = StorageMap< - _, - Twox64Concat, PropIndex, - (Vec, BalanceOf), - >; + pub type DepositOf = + StorageMap<_, Twox64Concat, PropIndex, (Vec, BalanceOf)>; /// Map of hashes to the proposal preimage, along with who registered it and their deposit. /// The block number is the block at which it was deposited. @@ -390,7 +395,8 @@ pub mod pallet { #[pallet::storage] pub type Preimages = StorageMap< _, - Identity, T::Hash, + Identity, + T::Hash, PreimageStatus, T::BlockNumber>, >; @@ -412,7 +418,8 @@ pub mod pallet { #[pallet::getter(fn referendum_info)] pub type ReferendumInfoOf = StorageMap< _, - Twox64Concat, ReferendumIndex, + Twox64Concat, + ReferendumIndex, ReferendumInfo>, >; @@ -422,7 +429,9 @@ pub mod pallet { /// TWOX-NOTE: SAFE as `AccountId`s are crypto hashes anyway. #[pallet::storage] pub type VotingOf = StorageMap< - _, Twox64Concat, T::AccountId, + _, + Twox64Concat, + T::AccountId, Voting, T::AccountId, T::BlockNumber>, ValueQuery, >; @@ -452,7 +461,8 @@ pub mod pallet { /// A record of who vetoed what. Maps proposal hash to a possible existent block number /// (until when it may not be resubmitted) and who vetoed it. #[pallet::storage] - pub type Blacklist = StorageMap<_, Identity, T::Hash, (T::BlockNumber, Vec)>; + pub type Blacklist = + StorageMap<_, Identity, T::Hash, (T::BlockNumber, Vec)>; /// Record of all proposals that have been subject to emergency cancellation. #[pallet::storage] @@ -472,9 +482,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - GenesisConfig { - _phantom: Default::default(), - } + GenesisConfig { _phantom: Default::default() } } } @@ -684,11 +692,10 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; - let seconds = Self::len_of_deposit_of(proposal) - .ok_or_else(|| Error::::ProposalMissing)?; + let seconds = + Self::len_of_deposit_of(proposal).ok_or_else(|| Error::::ProposalMissing)?; ensure!(seconds <= seconds_upper_bound, Error::::WrongUpperBound); - let mut deposit = Self::deposit_of(proposal) - .ok_or(Error::::ProposalMissing)?; + let mut deposit = Self::deposit_of(proposal).ok_or(Error::::ProposalMissing)?; T::Currency::reserve(&who, deposit.1)?; deposit.0.push(who); >::insert(proposal, deposit); @@ -726,7 +733,10 @@ pub mod pallet { /// /// Weight: `O(1)`. #[pallet::weight((T::WeightInfo::emergency_cancel(), DispatchClass::Operational))] - pub fn emergency_cancel(origin: OriginFor, ref_index: ReferendumIndex) -> DispatchResult { + pub fn emergency_cancel( + origin: OriginFor, + ref_index: ReferendumIndex, + ) -> DispatchResult { T::CancellationOrigin::ensure_origin(origin)?; let status = Self::referendum_status(ref_index)?; @@ -842,8 +852,8 @@ pub mod pallet { ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); } - let (e_proposal_hash, threshold) = >::get() - .ok_or(Error::::ProposalMissing)?; + let (e_proposal_hash, threshold) = + >::get().ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, @@ -875,11 +885,10 @@ pub mod pallet { Err(Error::::NoProposal)?; } - let mut existing_vetoers = >::get(&proposal_hash) - .map(|pair| pair.1) - .unwrap_or_else(Vec::new); - let insert_position = existing_vetoers.binary_search(&who) - .err().ok_or(Error::::AlreadyVetoed)?; + let mut existing_vetoers = + >::get(&proposal_hash).map(|pair| pair.1).unwrap_or_else(Vec::new); + let insert_position = + existing_vetoers.binary_search(&who).err().ok_or(Error::::AlreadyVetoed)?; existing_vetoers.insert(insert_position, who.clone()); let until = >::block_number() + T::CooloffPeriod::get(); @@ -949,7 +958,7 @@ pub mod pallet { origin: OriginFor, to: T::AccountId, conviction: Conviction, - balance: BalanceOf + balance: BalanceOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let votes = Self::try_delegate(who, to, conviction, balance)?; @@ -1089,10 +1098,11 @@ pub mod pallet { let (provider, deposit, since, expiry) = >::get(&proposal_hash) .and_then(|m| match m { - PreimageStatus::Available { provider, deposit, since, expiry, .. } - => Some((provider, deposit, since, expiry)), + PreimageStatus::Available { provider, deposit, since, expiry, .. } => + Some((provider, deposit, since, expiry)), _ => None, - }).ok_or(Error::::PreimageMissing)?; + }) + .ok_or(Error::::PreimageMissing)?; let now = >::block_number(); let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); @@ -1100,7 +1110,8 @@ pub mod pallet { ensure!(now >= since + voting + additional, Error::::TooEarly); ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); - let res = T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); + let res = + T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); debug_assert!(res.is_ok()); >::remove(&proposal_hash); Self::deposit_event(Event::::PreimageReaped(proposal_hash, provider, deposit, who)); @@ -1211,7 +1222,8 @@ pub mod pallet { /// Weight: `O(p)` (though as this is an high-privilege dispatch, we assume it has a /// reasonable value). #[pallet::weight((T::WeightInfo::blacklist(T::MaxProposals::get()), DispatchClass::Operational))] - pub fn blacklist(origin: OriginFor, + pub fn blacklist( + origin: OriginFor, proposal_hash: T::Hash, maybe_ref_index: Option, ) -> DispatchResult { @@ -1288,7 +1300,7 @@ impl Pallet { /// Get all referenda ready for tally at block `n`. pub fn maturing_referenda_at( - n: T::BlockNumber + n: T::BlockNumber, ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { let next = Self::lowest_unbaked(); let last = Self::referendum_count(); @@ -1299,7 +1311,8 @@ impl Pallet { n: T::BlockNumber, range: core::ops::Range, ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { - range.into_iter() + range + .into_iter() .map(|i| (i, Self::referendum_info(i))) .filter_map(|(i, maybe_info)| match maybe_info { Some(ReferendumInfo::Ongoing(status)) => Some((i, status)), @@ -1315,13 +1328,13 @@ impl Pallet { pub fn internal_start_referendum( proposal_hash: T::Hash, threshold: VoteThreshold, - delay: T::BlockNumber + delay: T::BlockNumber, ) -> ReferendumIndex { >::inject_referendum( >::block_number() + T::VotingPeriod::get(), proposal_hash, threshold, - delay + delay, ) } @@ -1334,25 +1347,28 @@ impl Pallet { // private. /// Ok if the given referendum is active, Err otherwise - fn ensure_ongoing(r: ReferendumInfo>) - -> Result>, DispatchError> - { + fn ensure_ongoing( + r: ReferendumInfo>, + ) -> Result>, DispatchError> { match r { ReferendumInfo::Ongoing(s) => Ok(s), _ => Err(Error::::ReferendumInvalid.into()), } } - fn referendum_status(ref_index: ReferendumIndex) - -> Result>, DispatchError> - { - let info = ReferendumInfoOf::::get(ref_index) - .ok_or(Error::::ReferendumInvalid)?; + fn referendum_status( + ref_index: ReferendumIndex, + ) -> Result>, DispatchError> { + let info = ReferendumInfoOf::::get(ref_index).ok_or(Error::::ReferendumInvalid)?; Self::ensure_ongoing(info) } /// Actually enact a vote, if legit. - fn try_vote(who: &T::AccountId, ref_index: ReferendumIndex, vote: AccountVote>) -> DispatchResult { + fn try_vote( + who: &T::AccountId, + ref_index: ReferendumIndex, + vote: AccountVote>, + ) -> DispatchResult { let mut status = Self::referendum_status(ref_index)?; ensure!(vote.balance() <= T::Currency::free_balance(who), Error::::InsufficientFunds); VotingOf::::try_mutate(who, |voting| -> DispatchResult { @@ -1365,11 +1381,14 @@ impl Pallet { status.tally.reduce(approve, *delegations); } votes[i].1 = vote; - } + }, Err(i) => { - ensure!(votes.len() as u32 <= T::MaxVotes::get(), Error::::MaxVotesReached); + ensure!( + votes.len() as u32 <= T::MaxVotes::get(), + Error::::MaxVotesReached + ); votes.insert(i, (ref_index, vote)); - } + }, } // Shouldn't be possible to fail, but we handle it gracefully. status.tally.add(vote).ok_or(ArithmeticError::Overflow)?; @@ -1383,12 +1402,7 @@ impl Pallet { })?; // Extend the lock to `balance` (rather than setting it) since we don't know what other // votes are in place. - T::Currency::extend_lock( - DEMOCRACY_ID, - who, - vote.balance(), - WithdrawReasons::TRANSFER - ); + T::Currency::extend_lock(DEMOCRACY_ID, who, vote.balance(), WithdrawReasons::TRANSFER); ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); Ok(()) } @@ -1399,11 +1413,17 @@ impl Pallet { /// - The referendum has finished and the voter's lock period is up. /// /// This will generally be combined with a call to `unlock`. - fn try_remove_vote(who: &T::AccountId, ref_index: ReferendumIndex, scope: UnvoteScope) -> DispatchResult { + fn try_remove_vote( + who: &T::AccountId, + ref_index: ReferendumIndex, + scope: UnvoteScope, + ) -> DispatchResult { let info = ReferendumInfoOf::::get(ref_index); VotingOf::::try_mutate(who, |voting| -> DispatchResult { if let Voting::Direct { ref mut votes, delegations, ref mut prior } = voting { - let i = votes.binary_search_by_key(&ref_index, |i| i.0).map_err(|_| Error::::NotVoter)?; + let i = votes + .binary_search_by_key(&ref_index, |i| i.0) + .map_err(|_| Error::::NotVoter)?; match info { Some(ReferendumInfo::Ongoing(mut status)) => { ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); @@ -1413,17 +1433,20 @@ impl Pallet { status.tally.reduce(approve, *delegations); } ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); - } - Some(ReferendumInfo::Finished{end, approved}) => + }, + Some(ReferendumInfo::Finished { end, approved }) => if let Some((lock_periods, balance)) = votes[i].1.locked_if(approved) { let unlock_at = end + T::EnactmentPeriod::get() * lock_periods.into(); let now = frame_system::Pallet::::block_number(); if now < unlock_at { - ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); + ensure!( + matches!(scope, UnvoteScope::Any), + Error::::NoPermission + ); prior.accumulate(unlock_at, balance) } }, - None => {} // Referendum was cancelled. + None => {}, // Referendum was cancelled. } votes.remove(i); } @@ -1444,15 +1467,15 @@ impl Pallet { *delegations = delegations.saturating_add(amount); for &(ref_index, account_vote) in votes.iter() { if let AccountVote::Standard { vote, .. } = account_vote { - ReferendumInfoOf::::mutate(ref_index, |maybe_info| + ReferendumInfoOf::::mutate(ref_index, |maybe_info| { if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { status.tally.increase(vote.aye, amount); } - ); + }); } } votes.len() as u32 - } + }, }) } @@ -1463,20 +1486,20 @@ impl Pallet { // We don't support second level delegating, so we don't need to do anything more. *delegations = delegations.saturating_sub(amount); 1 - } + }, Voting::Direct { votes, delegations, .. } => { *delegations = delegations.saturating_sub(amount); for &(ref_index, account_vote) in votes.iter() { if let AccountVote::Standard { vote, .. } = account_vote { - ReferendumInfoOf::::mutate(ref_index, |maybe_info| + ReferendumInfoOf::::mutate(ref_index, |maybe_info| { if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { status.tally.reduce(vote.aye, amount); } - ); + }); } } votes.len() as u32 - } + }, }) } @@ -1505,22 +1528,17 @@ impl Pallet { // remove any delegation votes to our current target. Self::reduce_upstream_delegation(&target, conviction.votes(balance)); voting.set_common(delegations, prior); - } + }, Voting::Direct { votes, delegations, prior } => { // here we just ensure that we're currently idling with no votes recorded. ensure!(votes.is_empty(), Error::::VotesExist); voting.set_common(delegations, prior); - } + }, } let votes = Self::increase_upstream_delegation(&target, conviction.votes(balance)); // Extend the lock to `balance` (rather than setting it) since we don't know what other // votes are in place. - T::Currency::extend_lock( - DEMOCRACY_ID, - &who, - balance, - WithdrawReasons::TRANSFER - ); + T::Currency::extend_lock(DEMOCRACY_ID, &who, balance, WithdrawReasons::TRANSFER); Ok(votes) })?; Self::deposit_event(Event::::Delegated(who, target)); @@ -1535,25 +1553,18 @@ impl Pallet { let mut old = Voting::default(); sp_std::mem::swap(&mut old, voting); match old { - Voting::Delegating { - balance, - target, - conviction, - delegations, - mut prior, - } => { + Voting::Delegating { balance, target, conviction, delegations, mut prior } => { // remove any delegation votes to our current target. - let votes = Self::reduce_upstream_delegation(&target, conviction.votes(balance)); + let votes = + Self::reduce_upstream_delegation(&target, conviction.votes(balance)); let now = frame_system::Pallet::::block_number(); let lock_periods = conviction.lock_periods().into(); prior.accumulate(now + T::EnactmentPeriod::get() * lock_periods, balance); voting.set_common(delegations, prior); Ok(votes) - } - Voting::Direct { .. } => { - Err(Error::::NotDelegating.into()) - } + }, + Voting::Direct { .. } => Err(Error::::NotDelegating.into()), } })?; Self::deposit_event(Event::::Undelegated(who)); @@ -1583,7 +1594,8 @@ impl Pallet { ) -> ReferendumIndex { let ref_index = Self::referendum_count(); ReferendumCount::::put(ref_index + 1); - let status = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; + let status = + ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; let item = ReferendumInfo::Ongoing(status); >::insert(ref_index, item); Self::deposit_event(Event::::Started(ref_index, threshold)); @@ -1596,7 +1608,8 @@ impl Pallet { Self::launch_public(now).or_else(|_| Self::launch_external(now)) } else { Self::launch_external(now).or_else(|_| Self::launch_public(now)) - }.map_err(|_| Error::::NoneWaiting.into()) + } + .map_err(|_| Error::::NoneWaiting.into()) } /// Table the waiting external proposal for a vote, if there is one. @@ -1654,8 +1667,10 @@ impl Pallet { debug_assert!(err_amount.is_zero()); Self::deposit_event(Event::::PreimageUsed(proposal_hash, provider, deposit)); - let res = proposal.dispatch(frame_system::RawOrigin::Root.into()) - .map(|_| ()).map_err(|e| e.error); + let res = proposal + .dispatch(frame_system::RawOrigin::Root.into()) + .map(|_| ()) + .map_err(|e| e.error); Self::deposit_event(Event::::Executed(index, res)); Ok(()) @@ -1685,10 +1700,14 @@ impl Pallet { } else { let when = now + status.delay; // Note that we need the preimage now. - Preimages::::mutate_exists(&status.proposal_hash, |maybe_pre| match *maybe_pre { - Some(PreimageStatus::Available { ref mut expiry, .. }) => *expiry = Some(when), - ref mut a => *a = Some(PreimageStatus::Missing(when)), - }); + Preimages::::mutate_exists( + &status.proposal_hash, + |maybe_pre| match *maybe_pre { + Some(PreimageStatus::Available { ref mut expiry, .. }) => + *expiry = Some(when), + ref mut a => *a = Some(PreimageStatus::Missing(when)), + }, + ); if T::Scheduler::schedule_named( (DEMOCRACY_ID, index).encode(), @@ -1697,7 +1716,9 @@ impl Pallet { 63, frame_system::RawOrigin::Root.into(), Call::enact_proposal(status.proposal_hash, index).into(), - ).is_err() { + ) + .is_err() + { frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); } } @@ -1762,7 +1783,8 @@ impl Pallet { // To decode the enum variant we only need the first byte. let mut buf = [0u8; 1]; let key = >::hashed_key_for(proposal_hash); - let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::NotImminent)?; + let bytes = + sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::NotImminent)?; // The value may be smaller that 1 byte. let mut input = &buf[0..buf.len().min(bytes as usize)]; @@ -1772,7 +1794,7 @@ impl Pallet { _ => { sp_runtime::print("Failed to decode `PreimageStatus` variant"); Err(Error::::NotImminent.into()) - } + }, } } @@ -1790,7 +1812,8 @@ impl Pallet { // * at most 5 bytes to decode a `Compact` let mut buf = [0u8; 6]; let key = >::hashed_key_for(proposal_hash); - let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::PreimageMissing)?; + let bytes = + sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::PreimageMissing)?; // The value may be smaller that 6 bytes. let mut input = &buf[0..buf.len().min(bytes as usize)]; @@ -1799,15 +1822,17 @@ impl Pallet { Ok(0) => return Err(Error::::PreimageMissing.into()), _ => { sp_runtime::print("Failed to decode `PreimageStatus` variant"); - return Err(Error::::PreimageMissing.into()); - } + return Err(Error::::PreimageMissing.into()) + }, } // Decode the length of the vector. - let len = codec::Compact::::decode(&mut input).map_err(|_| { - sp_runtime::print("Failed to decode `PreimageStatus` variant"); - DispatchError::from(Error::::PreimageMissing) - })?.0; + let len = codec::Compact::::decode(&mut input) + .map_err(|_| { + sp_runtime::print("Failed to decode `PreimageStatus` variant"); + DispatchError::from(Error::::PreimageMissing) + })? + .0; Ok(len) } @@ -1837,7 +1862,10 @@ impl Pallet { } // See `note_imminent_preimage` - fn note_imminent_preimage_inner(who: T::AccountId, encoded_proposal: Vec) -> DispatchResult { + fn note_imminent_preimage_inner( + who: T::AccountId, + encoded_proposal: Vec, + ) -> DispatchResult { let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); Self::check_pre_image_is_missing(proposal_hash)?; let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; @@ -1873,6 +1901,6 @@ fn decode_compact_u32_at(key: &[u8]) -> Option { sp_runtime::print("Failed to decode compact u32 at:"); sp_runtime::print(key); None - } + }, } } diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 1c68715d49e3ef65fab0d5f5c9e7685af85f9149..64444304db673b9b97099ba488ddd994a2036f5a 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -17,23 +17,25 @@ //! The crate's tests. -use crate as pallet_democracy; use super::*; +use crate as pallet_democracy; use codec::Encode; use frame_support::{ - assert_noop, assert_ok, parameter_types, ord_parameter_types, - traits::{SortedMembers, OnInitialize, Filter, GenesisBuild}, + assert_noop, assert_ok, ord_parameter_types, parameter_types, + traits::{Filter, GenesisBuild, OnInitialize, SortedMembers}, weights::Weight, }; +use frame_system::{EnsureRoot, EnsureSignedBy}; +use pallet_balances::{BalanceLock, Error as BalancesError}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, - testing::Header, Perbill, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, }; -use pallet_balances::{BalanceLock, Error as BalancesError}; -use frame_system::{EnsureSignedBy, EnsureRoot}; mod cancellation; +mod decoders; mod delegation; mod external_proposing; mod fast_tracking; @@ -42,7 +44,6 @@ mod preimage; mod public_proposals; mod scheduling; mod voting; -mod decoders; const AYE: Vote = Vote { aye: true, conviction: Conviction::None }; const NAY: Vote = Vote { aye: false, conviction: Conviction::None }; @@ -194,10 +195,14 @@ impl Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - }.assimilate_storage(&mut t).unwrap(); - pallet_democracy::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); + pallet_democracy::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -246,19 +251,11 @@ fn set_balance_proposal_hash_and_note(value: u64) -> H256 { } fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash(value), - delay, - ) + Democracy::propose(Origin::signed(who), set_balance_proposal_hash(value), delay) } fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash_and_note(value), - delay, - ) + Democracy::propose(Origin::signed(who), set_balance_proposal_hash_and_note(value), delay) } fn next_block() { diff --git a/substrate/frame/democracy/src/tests/cancellation.rs b/substrate/frame/democracy/src/tests/cancellation.rs index d48173a39d8326c0f3386ecdea9b6dbf7004e8b5..c2bd725ce934a49cab3ea31c0b038954dbbb68bc 100644 --- a/substrate/frame/democracy/src/tests/cancellation.rs +++ b/substrate/frame/democracy/src/tests/cancellation.rs @@ -26,7 +26,7 @@ fn cancel_referendum_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_ok!(Democracy::cancel_referendum(Origin::root(), r.into())); @@ -67,7 +67,7 @@ fn emergency_cancel_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 2 + 2, ); assert!(Democracy::referendum_status(r).is_ok()); @@ -81,7 +81,7 @@ fn emergency_cancel_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 2 + 2, ); assert!(Democracy::referendum_status(r).is_ok()); assert_noop!( diff --git a/substrate/frame/democracy/src/tests/decoders.rs b/substrate/frame/democracy/src/tests/decoders.rs index c3eb9ca7e3322245451adecc04915fed48b3768a..3c1729c4355c0067b386c22f045f08888f4fc3cc 100644 --- a/substrate/frame/democracy/src/tests/decoders.rs +++ b/substrate/frame/democracy/src/tests/decoders.rs @@ -66,7 +66,7 @@ fn pre_image() { assert_noop!(Democracy::check_pre_image_is_missing(key), Error::::NotImminent); for l in vec![0, 10, 100, 1000u32] { - let available = PreimageStatus::Available{ + let available = PreimageStatus::Available { data: (0..l).map(|i| i as u8).collect(), provider: 0, deposit: 0, @@ -76,8 +76,10 @@ fn pre_image() { Preimages::::insert(key, available); assert_eq!(Democracy::pre_image_data_len(key), Ok(l)); - assert_noop!(Democracy::check_pre_image_is_missing(key), - Error::::DuplicatePreimage); + assert_noop!( + Democracy::check_pre_image_is_missing(key), + Error::::DuplicatePreimage + ); } }) } diff --git a/substrate/frame/democracy/src/tests/external_proposing.rs b/substrate/frame/democracy/src/tests/external_proposing.rs index 37654a5e91462769cd04c7b935dac1685e1553b1..7442964584fa93f368fff219be9fbe96409ede2f 100644 --- a/substrate/frame/democracy/src/tests/external_proposing.rs +++ b/substrate/frame/democracy/src/tests/external_proposing.rs @@ -34,17 +34,17 @@ fn veto_external_works() { // cancelled. assert!(!>::exists()); // fails - same proposal can't be resubmitted. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); fast_forward_to(1); // fails as we're still in cooloff period. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); fast_forward_to(2); // works; as we're out of the cooloff period. @@ -67,10 +67,10 @@ fn veto_external_works() { fast_forward_to(3); // same proposal fails as we're still in cooloff - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); // different proposal works fine. assert_ok!(Democracy::external_propose( Origin::signed(2), @@ -96,10 +96,7 @@ fn external_blacklisting_should_work() { assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert_noop!( - Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - ), + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash_and_note(2),), Error::::ProposalBlacklisted, ); }); @@ -110,20 +107,17 @@ fn external_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose( - Origin::signed(1), - set_balance_proposal_hash(2), - ), + Democracy::external_propose(Origin::signed(1), set_balance_proposal_hash(2),), BadOrigin, ); assert_ok!(Democracy::external_propose( Origin::signed(2), set_balance_proposal_hash_and_note(2), )); - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(1), - ), Error::::DuplicateProposal); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(1),), + Error::::DuplicateProposal + ); fast_forward_to(2); assert_eq!( Democracy::referendum_status(0), @@ -143,10 +137,7 @@ fn external_majority_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_majority( - Origin::signed(1), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_majority(Origin::signed(1), set_balance_proposal_hash(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_majority( @@ -172,10 +163,7 @@ fn external_default_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); assert_noop!( - Democracy::external_propose_default( - Origin::signed(3), - set_balance_proposal_hash(2) - ), + Democracy::external_propose_default(Origin::signed(3), set_balance_proposal_hash(2)), BadOrigin, ); assert_ok!(Democracy::external_propose_default( @@ -196,7 +184,6 @@ fn external_default_referendum_works() { }); } - #[test] fn external_and_public_interleaving_works() { new_test_ext().execute_with(|| { @@ -222,9 +209,9 @@ fn external_and_public_interleaving_works() { ); // replenish external assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(3), - )); + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); fast_forward_to(4); @@ -256,9 +243,9 @@ fn external_and_public_interleaving_works() { ); // replenish external assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(5), - )); + Origin::signed(2), + set_balance_proposal_hash_and_note(5), + )); fast_forward_to(8); diff --git a/substrate/frame/democracy/src/tests/fast_tracking.rs b/substrate/frame/democracy/src/tests/fast_tracking.rs index d01dafaa762baa2a5a19946067b6f3267a852cb1..9b2f2760bde1cbc35437ddc28fcd6255a5e2cc25 100644 --- a/substrate/frame/democracy/src/tests/fast_tracking.rs +++ b/substrate/frame/democracy/src/tests/fast_tracking.rs @@ -24,7 +24,10 @@ fn fast_track_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); let h = set_balance_proposal_hash_and_note(2); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::ProposalMissing + ); assert_ok!(Democracy::external_propose_majority( Origin::signed(3), set_balance_proposal_hash_and_note(2) @@ -49,14 +52,20 @@ fn instant_referendum_works() { new_test_ext().execute_with(|| { System::set_block_number(0); let h = set_balance_proposal_hash_and_note(2); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::ProposalMissing + ); assert_ok!(Democracy::external_propose_majority( Origin::signed(3), set_balance_proposal_hash_and_note(2) )); assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); assert_noop!(Democracy::fast_track(Origin::signed(5), h, 1, 0), BadOrigin); - assert_noop!(Democracy::fast_track(Origin::signed(6), h, 1, 0), Error::::InstantNotAllowed); + assert_noop!( + Democracy::fast_track(Origin::signed(6), h, 1, 0), + Error::::InstantNotAllowed + ); INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); assert_eq!( diff --git a/substrate/frame/democracy/src/tests/lock_voting.rs b/substrate/frame/democracy/src/tests/lock_voting.rs index 29cd24e1de60a0a1885653b3de4159423deb2d1d..c1a27400fe557a844cbb6b32610ca4e8b11394e0 100644 --- a/substrate/frame/democracy/src/tests/lock_voting.rs +++ b/substrate/frame/democracy/src/tests/lock_voting.rs @@ -23,23 +23,19 @@ use std::convert::TryFrom; fn aye(x: u8, balance: u64) -> AccountVote { AccountVote::Standard { vote: Vote { aye: true, conviction: Conviction::try_from(x).unwrap() }, - balance + balance, } } fn nay(x: u8, balance: u64) -> AccountVote { AccountVote::Standard { vote: Vote { aye: false, conviction: Conviction::try_from(x).unwrap() }, - balance + balance, } } fn the_lock(amount: u64) -> BalanceLock { - BalanceLock { - id: DEMOCRACY_ID, - amount, - reasons: pallet_balances::Reasons::Misc, - } + BalanceLock { id: DEMOCRACY_ID, amount, reasons: pallet_balances::Reasons::Misc } } #[test] @@ -50,7 +46,7 @@ fn lock_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); @@ -74,7 +70,10 @@ fn lock_voting_should_work() { assert_ok!(Democracy::unlock(Origin::signed(2), 5)); // 2, 3, 4 got their way with the vote, so they cannot be reaped by others. - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 2, r), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 2, r), + Error::::NoPermission + ); // However, they can be unvoted by the owner, though it will make no difference to the lock. assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); assert_ok!(Democracy::unlock(Origin::signed(2), 2)); @@ -86,10 +85,12 @@ fn lock_voting_should_work() { assert_eq!(Balances::locks(5), vec![]); assert_eq!(Balances::free_balance(42), 2); - fast_forward_to(5); // No change yet... - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 4, r), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 4, r), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(1), 4)); assert_eq!(Balances::locks(4), vec![the_lock(40)]); fast_forward_to(6); @@ -99,7 +100,10 @@ fn lock_voting_should_work() { assert_eq!(Balances::locks(4), vec![]); fast_forward_to(9); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 3, r), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 3, r), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(1), 3)); assert_eq!(Balances::locks(3), vec![the_lock(30)]); fast_forward_to(10); @@ -145,7 +149,7 @@ fn lock_voting_should_work_with_delegation() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); @@ -168,7 +172,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r1, aye(4, 10))); @@ -176,7 +180,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r2, aye(3, 20))); @@ -184,7 +188,7 @@ fn setup_three_referenda() -> (u32, u32, u32) { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r3, aye(2, 50))); @@ -202,7 +206,10 @@ fn prior_lockvotes_should_be_enforced() { // r.2 locked 50 until #6. fast_forward_to(5); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.2), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(50)]); fast_forward_to(6); @@ -210,7 +217,10 @@ fn prior_lockvotes_should_be_enforced() { assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(9); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.1), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(20)]); fast_forward_to(10); @@ -218,7 +228,10 @@ fn prior_lockvotes_should_be_enforced() { assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(17); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0), Error::::NoPermission); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.0), + Error::::NoPermission + ); assert_ok!(Democracy::unlock(Origin::signed(5), 5)); assert_eq!(Balances::locks(5), vec![the_lock(10)]); fast_forward_to(18); @@ -296,7 +309,7 @@ fn locks_should_persist_from_voting_to_delegation() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SimpleMajority, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r, aye(4, 10))); fast_forward_to(2); diff --git a/substrate/frame/democracy/src/tests/preimage.rs b/substrate/frame/democracy/src/tests/preimage.rs index a412343299d9fc86848718824ec06adb967d844f..6d478fcaa68c7300075ba000cffcdf5a8ef78432 100644 --- a/substrate/frame/democracy/src/tests/preimage.rs +++ b/substrate/frame/democracy/src/tests/preimage.rs @@ -26,7 +26,7 @@ fn missing_preimage_should_fail() { 2, set_balance_proposal_hash(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); @@ -43,8 +43,11 @@ fn preimage_deposit_should_be_required_and_returned() { // fee of 100 is too much. PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); assert_noop!( - if operational { Democracy::note_preimage_operational(Origin::signed(6), vec![0; 500]) } - else { Democracy::note_preimage(Origin::signed(6), vec![0; 500]) }, + if operational { + Democracy::note_preimage_operational(Origin::signed(6), vec![0; 500]) + } else { + Democracy::note_preimage(Origin::signed(6), vec![0; 500]) + }, BalancesError::::InsufficientBalance, ); // fee of 1 is reasonable. @@ -53,7 +56,7 @@ fn preimage_deposit_should_be_required_and_returned() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); @@ -72,10 +75,11 @@ fn preimage_deposit_should_be_required_and_returned() { fn preimage_deposit_should_be_reapable_earlier_by_owner() { new_test_ext_execute_with_cond(|operational| { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!( - if operational { Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) } - else { Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) } - ); + assert_ok!(if operational { + Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + } else { + Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + }); assert_eq!(Balances::reserved_balance(6), 12); @@ -85,7 +89,11 @@ fn preimage_deposit_should_be_reapable_earlier_by_owner() { Error::::TooEarly ); next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2), u32::MAX)); + assert_ok!(Democracy::reap_preimage( + Origin::signed(6), + set_balance_proposal_hash(2), + u32::MAX + )); assert_eq!(Balances::free_balance(6), 60); assert_eq!(Balances::reserved_balance(6), 0); @@ -96,27 +104,32 @@ fn preimage_deposit_should_be_reapable_earlier_by_owner() { fn preimage_deposit_should_be_reapable() { new_test_ext_execute_with_cond(|operational| { assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), - Error::::PreimageMissing - ); + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Error::::PreimageMissing + ); PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!( - if operational { Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) } - else { Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) } - ); + assert_ok!(if operational { + Democracy::note_preimage_operational(Origin::signed(6), set_balance_proposal(2)) + } else { + Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2)) + }); assert_eq!(Balances::reserved_balance(6), 12); next_block(); next_block(); next_block(); assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), - Error::::TooEarly - ); + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX), + Error::::TooEarly + ); next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2), u32::MAX)); + assert_ok!(Democracy::reap_preimage( + Origin::signed(5), + set_balance_proposal_hash(2), + u32::MAX + )); assert_eq!(Balances::reserved_balance(6), 0); assert_eq!(Balances::free_balance(6), 48); assert_eq!(Balances::free_balance(5), 62); @@ -132,13 +145,19 @@ fn noting_imminent_preimage_for_free_should_work() { 2, set_balance_proposal_hash(2), VoteThreshold::SuperMajorityApprove, - 1 + 1, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_noop!( - if operational { Democracy::note_imminent_preimage_operational(Origin::signed(6), set_balance_proposal(2)) } - else { Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)) }, + if operational { + Democracy::note_imminent_preimage_operational( + Origin::signed(6), + set_balance_proposal(2), + ) + } else { + Democracy::note_imminent_preimage(Origin::signed(6), set_balance_proposal(2)) + }, Error::::NotImminent ); @@ -161,7 +180,10 @@ fn reaping_imminent_preimage_should_fail() { assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); next_block(); next_block(); - assert_noop!(Democracy::reap_preimage(Origin::signed(6), h, u32::MAX), Error::::Imminent); + assert_noop!( + Democracy::reap_preimage(Origin::signed(6), h, u32::MAX), + Error::::Imminent + ); }); } @@ -174,7 +196,7 @@ fn note_imminent_preimage_can_only_be_successful_once() { 2, set_balance_proposal_hash(2), VoteThreshold::SuperMajorityApprove, - 1 + 1, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); next_block(); diff --git a/substrate/frame/democracy/src/tests/public_proposals.rs b/substrate/frame/democracy/src/tests/public_proposals.rs index 1d323d684d7f2ad395d142fa441bff7267a608e3..34713c3e15725b944161c7da4de39c64eaa107f3 100644 --- a/substrate/frame/democracy/src/tests/public_proposals.rs +++ b/substrate/frame/democracy/src/tests/public_proposals.rs @@ -89,10 +89,7 @@ fn poor_seconder_should_not_work() { fn invalid_seconds_upper_bound_should_not_work() { new_test_ext().execute_with(|| { assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_noop!( - Democracy::second(Origin::signed(2), 0, 0), - Error::::WrongUpperBound - ); + assert_noop!(Democracy::second(Origin::signed(2), 0, 0), Error::::WrongUpperBound); }); } diff --git a/substrate/frame/democracy/src/tests/scheduling.rs b/substrate/frame/democracy/src/tests/scheduling.rs index e178ff0fc1a25597666af2d0cf55d69530be13ad..06b492bc6093c228378db25e279d12e7638d62ed 100644 --- a/substrate/frame/democracy/src/tests/scheduling.rs +++ b/substrate/frame/democracy/src/tests/scheduling.rs @@ -26,7 +26,7 @@ fn simple_passing_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); @@ -43,7 +43,7 @@ fn simple_failing_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); assert_eq!(tally(r), Tally { ayes: 0, nays: 1, turnout: 10 }); @@ -62,13 +62,13 @@ fn ooo_inject_referendums_should_work() { 3, set_balance_proposal_hash_and_note(3), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); let r2 = Democracy::inject_referendum( 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); @@ -92,7 +92,7 @@ fn delayed_enactment_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 1 + 1, ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); diff --git a/substrate/frame/democracy/src/tests/voting.rs b/substrate/frame/democracy/src/tests/voting.rs index 13072ebf87b114a0026e2ed281b1d361aa35ab97..e035c2d46c1b66cb9d38585f0ac8b483072295f4 100644 --- a/substrate/frame/democracy/src/tests/voting.rs +++ b/substrate/frame/democracy/src/tests/voting.rs @@ -23,7 +23,10 @@ use super::*; fn overvoting_should_fail() { new_test_ext().execute_with(|| { let r = begin_referendum(); - assert_noop!(Democracy::vote(Origin::signed(1), r, aye(2)), Error::::InsufficientFunds); + assert_noop!( + Democracy::vote(Origin::signed(1), r, aye(2)), + Error::::InsufficientFunds + ); }); } @@ -102,7 +105,7 @@ fn controversial_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(1), r, big_aye(1))); @@ -128,7 +131,7 @@ fn controversial_low_turnout_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); @@ -152,7 +155,7 @@ fn passing_low_turnout_voting_should_work() { 2, set_balance_proposal_hash_and_note(2), VoteThreshold::SuperMajorityApprove, - 0 + 0, ); assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); diff --git a/substrate/frame/democracy/src/types.rs b/substrate/frame/democracy/src/types.rs index 22341ba31ee03b5edb7c1aeffe3351f1e5cad565..4e643006e51678b97bedf599e644cefad881db2c 100644 --- a/substrate/frame/democracy/src/types.rs +++ b/substrate/frame/democracy/src/types.rs @@ -17,29 +17,31 @@ //! Miscellaneous additional datatypes. -use codec::{Encode, Decode}; -use sp_runtime::RuntimeDebug; -use sp_runtime::traits::{Zero, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, Saturating}; -use crate::{Vote, VoteThreshold, AccountVote, Conviction}; +use crate::{AccountVote, Conviction, Vote, VoteThreshold}; +use codec::{Decode, Encode}; +use sp_runtime::{ + traits::{Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Saturating, Zero}, + RuntimeDebug, +}; /// Info regarding an ongoing referendum. #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Tally { /// The number of aye votes, expressed in terms of post-conviction lock-vote. - pub (crate) ayes: Balance, + pub(crate) ayes: Balance, /// The number of nay votes, expressed in terms of post-conviction lock-vote. - pub (crate) nays: Balance, + pub(crate) nays: Balance, /// The amount of funds currently expressing its opinion. Pre-conviction. - pub (crate) turnout: Balance, + pub(crate) turnout: Balance, } /// Amount of votes and capital placed in delegation for an account. #[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Delegations { /// The number of votes (this is post-conviction). - pub (crate) votes: Balance, + pub(crate) votes: Balance, /// The amount of raw capital, used for the turnout. - pub (crate) capital: Balance, + pub(crate) capital: Balance, } impl Saturating for Delegations { @@ -65,22 +67,24 @@ impl Saturating for Delegations { } fn saturating_pow(self, exp: usize) -> Self { - Self { - votes: self.votes.saturating_pow(exp), - capital: self.capital.saturating_pow(exp), - } + Self { votes: self.votes.saturating_pow(exp), capital: self.capital.saturating_pow(exp) } } } impl< - Balance: From + Zero + Copy + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Bounded + - Saturating -> Tally { + Balance: From + + Zero + + Copy + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Bounded + + Saturating, + > Tally +{ /// Create a new tally. - pub fn new( - vote: Vote, - balance: Balance, - ) -> Self { + pub fn new(vote: Vote, balance: Balance) -> Self { let Delegations { votes, capital } = vote.conviction.votes(balance); Self { ayes: if vote.aye { votes } else { Zero::zero() }, @@ -90,10 +94,7 @@ impl< } /// Add an account's vote into the tally. - pub fn add( - &mut self, - vote: AccountVote, - ) -> Option<()> { + pub fn add(&mut self, vote: AccountVote) -> Option<()> { match vote { AccountVote::Standard { vote, balance } => { let Delegations { votes, capital } = vote.conviction.votes(balance); @@ -102,23 +103,20 @@ impl< true => self.ayes = self.ayes.checked_add(&votes)?, false => self.nays = self.nays.checked_add(&votes)?, } - } + }, AccountVote::Split { aye, nay } => { let aye = Conviction::None.votes(aye); let nay = Conviction::None.votes(nay); self.turnout = self.turnout.checked_add(&aye.capital)?.checked_add(&nay.capital)?; self.ayes = self.ayes.checked_add(&aye.votes)?; self.nays = self.nays.checked_add(&nay.votes)?; - } + }, } Some(()) } /// Remove an account's vote from the tally. - pub fn remove( - &mut self, - vote: AccountVote, - ) -> Option<()> { + pub fn remove(&mut self, vote: AccountVote) -> Option<()> { match vote { AccountVote::Standard { vote, balance } => { let Delegations { votes, capital } = vote.conviction.votes(balance); @@ -127,14 +125,14 @@ impl< true => self.ayes = self.ayes.checked_sub(&votes)?, false => self.nays = self.nays.checked_sub(&votes)?, } - } + }, AccountVote::Split { aye, nay } => { let aye = Conviction::None.votes(aye); let nay = Conviction::None.votes(nay); self.turnout = self.turnout.checked_sub(&aye.capital)?.checked_sub(&nay.capital)?; self.ayes = self.ayes.checked_sub(&aye.votes)?; self.nays = self.nays.checked_sub(&nay.votes)?; - } + }, } Some(()) } @@ -164,15 +162,15 @@ impl< #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct ReferendumStatus { /// When voting on this referendum will end. - pub (crate) end: BlockNumber, + pub(crate) end: BlockNumber, /// The hash of the proposal being voted on. - pub (crate) proposal_hash: Hash, + pub(crate) proposal_hash: Hash, /// The thresholding mechanism to determine whether it passed. - pub (crate) threshold: VoteThreshold, + pub(crate) threshold: VoteThreshold, /// The delay (in blocks) to wait after a successful referendum before deploying. - pub (crate) delay: BlockNumber, + pub(crate) delay: BlockNumber, /// The current tally of votes in this referendum. - pub (crate) tally: Tally, + pub(crate) tally: Tally, } /// Info regarding a referendum, present or past. @@ -181,7 +179,7 @@ pub enum ReferendumInfo { /// Referendum is happening, the arg is the block number at which it will end. Ongoing(ReferendumStatus), /// Referendum finished at `end`, and has been `approved` or rejected. - Finished{approved: bool, end: BlockNumber}, + Finished { approved: bool, end: BlockNumber }, } impl ReferendumInfo { @@ -192,7 +190,7 @@ impl ReferendumInfo Self { - let s = ReferendumStatus{ end, proposal_hash, threshold, delay, tally: Tally::default() }; + let s = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Tally::default() }; ReferendumInfo::Ongoing(s) } } diff --git a/substrate/frame/democracy/src/vote.rs b/substrate/frame/democracy/src/vote.rs index 5adc76f4ae00b28ac9a9c147412102675d0f2abf..7b1b32ea37f5825ef742b60c2f78c1b9a5908309 100644 --- a/substrate/frame/democracy/src/vote.rs +++ b/substrate/frame/democracy/src/vote.rs @@ -17,10 +17,13 @@ //! The vote datatype. -use sp_std::{prelude::*, result::Result, convert::TryFrom}; -use codec::{Encode, EncodeLike, Decode, Output, Input}; -use sp_runtime::{RuntimeDebug, traits::{Saturating, Zero}}; -use crate::{Conviction, ReferendumIndex, Delegations}; +use crate::{Conviction, Delegations, ReferendumIndex}; +use codec::{Decode, Encode, EncodeLike, Input, Output}; +use sp_runtime::{ + traits::{Saturating, Zero}, + RuntimeDebug, +}; +use sp_std::{convert::TryFrom, prelude::*, result::Result}; /// A number of lock periods, plus a vote, one way or the other. #[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)] @@ -136,7 +139,9 @@ pub enum Voting { }, } -impl Default for Voting { +impl Default + for Voting +{ fn default() -> Self { Voting::Direct { votes: Vec::new(), @@ -146,31 +151,30 @@ impl Default for Voting Voting { +impl + Voting +{ pub fn rejig(&mut self, now: BlockNumber) { match self { Voting::Direct { prior, .. } => prior, Voting::Delegating { prior, .. } => prior, - }.rejig(now); + } + .rejig(now); } /// The amount of this account's balance that much currently be locked due to voting. pub fn locked_balance(&self) -> Balance { match self { - Voting::Direct { votes, prior, .. } => votes.iter() - .map(|i| i.1.balance()) - .fold(prior.locked(), |a, i| a.max(i)), + Voting::Direct { votes, prior, .. } => + votes.iter().map(|i| i.1.balance()).fold(prior.locked(), |a, i| a.max(i)), Voting::Delegating { balance, .. } => *balance, } } - pub fn set_common(&mut self, + pub fn set_common( + &mut self, delegations: Delegations, - prior: PriorLock + prior: PriorLock, ) { let (d, p) = match self { Voting::Direct { ref mut delegations, ref mut prior, .. } => (delegations, prior), diff --git a/substrate/frame/democracy/src/vote_threshold.rs b/substrate/frame/democracy/src/vote_threshold.rs index 3114b22499d0e855a85f39bd0bc1da1154b428bc..feaa596921c41142a80adb4a5c4376c510075a99 100644 --- a/substrate/frame/democracy/src/vote_threshold.rs +++ b/substrate/frame/democracy/src/vote_threshold.rs @@ -17,12 +17,12 @@ //! Voting thresholds. -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_runtime::traits::{Zero, IntegerSquareRoot}; -use sp_std::ops::{Add, Mul, Div, Rem}; use crate::Tally; +use codec::{Decode, Encode}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use sp_runtime::traits::{IntegerSquareRoot, Zero}; +use sp_std::ops::{Add, Div, Mul, Rem}; /// A means of determining if a vote is past pass threshold. #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug)] @@ -43,25 +43,32 @@ pub trait Approved { } /// Return `true` iff `n1 / d1 < n2 / d2`. `d1` and `d2` may not be zero. -fn compare_rationals + Div + Rem + Ord + Copy>(mut n1: T, mut d1: T, mut n2: T, mut d2: T) -> bool { +fn compare_rationals< + T: Zero + Mul + Div + Rem + Ord + Copy, +>( + mut n1: T, + mut d1: T, + mut n2: T, + mut d2: T, +) -> bool { // Uses a continued fractional representation for a non-overflowing compare. // Detailed at https://janmr.com/blog/2014/05/comparing-rational-numbers-without-overflow/. loop { let q1 = n1 / d1; let q2 = n2 / d2; if q1 < q2 { - return true; + return true } if q2 < q1 { - return false; + return false } let r1 = n1 % d1; let r2 = n2 % d2; if r2.is_zero() { - return false; + return false } if r1.is_zero() { - return true; + return true } n1 = d2; n2 = d1; @@ -71,14 +78,22 @@ fn compare_rationals + Div + Rem - + Mul + Div - + Rem + Copy, -> Approved for VoteThreshold { + Balance: IntegerSquareRoot + + Zero + + Ord + + Add + + Mul + + Div + + Rem + + Copy, + > Approved for VoteThreshold +{ fn approved(&self, tally: Tally, electorate: Balance) -> bool { let sqrt_voters = tally.turnout.integer_sqrt(); let sqrt_electorate = electorate.integer_sqrt(); - if sqrt_voters.is_zero() { return false; } + if sqrt_voters.is_zero() { + return false + } match *self { VoteThreshold::SuperMajorityApprove => compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate), @@ -95,7 +110,9 @@ mod tests { #[test] fn should_work() { - assert!(!VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 60, nays: 50, turnout: 110}, 210)); - assert!(VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 100, nays: 50, turnout: 150}, 210)); + assert!(!VoteThreshold::SuperMajorityApprove + .approved(Tally { ayes: 60, nays: 50, turnout: 110 }, 210)); + assert!(VoteThreshold::SuperMajorityApprove + .approved(Tally { ayes: 100, nays: 50, turnout: 150 }, 210)); } } diff --git a/substrate/frame/democracy/src/weights.rs b/substrate/frame/democracy/src/weights.rs index 1462e65c409b1cea4240a2a9128e2abfdce1d7bf..6572e62889c1ceb84e49c88a67c63f4aa24ceec5 100644 --- a/substrate/frame/democracy/src/weights.rs +++ b/substrate/frame/democracy/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs index 6cf581135f144a3925245add0881df72ef465e57..5e89db7537d0709958fb05901d1b32e77f0fbcc0 100644 --- a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs @@ -18,7 +18,7 @@ //! Two phase election pallet benchmarking. use super::*; -use crate::{Pallet as MultiPhase, unsigned::IndexAssignmentOf}; +use crate::{unsigned::IndexAssignmentOf, Pallet as MultiPhase}; use frame_benchmarking::{account, impl_benchmark_test_suite}; use frame_support::{assert_ok, traits::Hooks}; use frame_system::RawOrigin; @@ -53,8 +53,9 @@ fn solution_with_size( let stake: VoteWeight = ed.max(One::one()).saturating_mul(100); // first generates random targets. - let targets: Vec = - (0..size.targets).map(|i| frame_benchmarking::account("Targets", i, SEED)).collect(); + let targets: Vec = (0..size.targets) + .map(|i| frame_benchmarking::account("Targets", i, SEED)) + .collect(); let mut rng = SmallRng::seed_from_u64(SEED.into()); @@ -80,8 +81,11 @@ fn solution_with_size( .collect::>(); // rest of the voters. They can only vote for non-winners. - let non_winners = - targets.iter().filter(|t| !winners.contains(t)).cloned().collect::>(); + let non_winners = targets + .iter() + .filter(|t| !winners.contains(t)) + .cloned() + .collect::>(); let rest_voters = (active_voters_count..size.voters) .map(|i| { let votes = (&non_winners) @@ -147,14 +151,22 @@ fn set_up_data_provider(v: u32, t: u32) { // number of votes in snapshot. T::DataProvider::clear(); - log!(info, "setting up with voters = {} [degree = {}], targets = {}", v, T::DataProvider::MAXIMUM_VOTES_PER_VOTER, t); + log!( + info, + "setting up with voters = {} [degree = {}], targets = {}", + v, + T::DataProvider::MAXIMUM_VOTES_PER_VOTER, + t + ); // fill targets. - let mut targets = (0..t).map(|i| { - let target = frame_benchmarking::account::("Target", i, SEED); - T::DataProvider::add_target(target.clone()); - target - }).collect::>(); + let mut targets = (0..t) + .map(|i| { + let target = frame_benchmarking::account::("Target", i, SEED); + T::DataProvider::add_target(target.clone()); + target + }) + .collect::>(); // we should always have enough voters to fill. assert!(targets.len() > T::DataProvider::MAXIMUM_VOTES_PER_VOTER as usize); targets.truncate(T::DataProvider::MAXIMUM_VOTES_PER_VOTER as usize); diff --git a/substrate/frame/election-provider-multi-phase/src/helpers.rs b/substrate/frame/election-provider-multi-phase/src/helpers.rs index 46eeef0a6bf73800e6db878d48f295a59483d108..0abf448a4567befd8f5c79ce6ce84921144c354a 100644 --- a/substrate/frame/election-provider-multi-phase/src/helpers.rs +++ b/substrate/frame/election-provider-multi-phase/src/helpers.rs @@ -17,7 +17,7 @@ //! Some helper functions/macros for this crate. -use super::{Config, VoteWeight, CompactVoterIndexOf, CompactTargetIndexOf}; +use super::{CompactTargetIndexOf, CompactVoterIndexOf, Config, VoteWeight}; use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::*}; #[macro_export] @@ -58,7 +58,9 @@ pub fn voter_index_fn( cache: &BTreeMap, ) -> impl Fn(&T::AccountId) -> Option> + '_ { move |who| { - cache.get(who).and_then(|i| >>::try_into(*i).ok()) + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) } } @@ -70,7 +72,9 @@ pub fn voter_index_fn_owned( cache: BTreeMap, ) -> impl Fn(&T::AccountId) -> Option> { move |who| { - cache.get(who).and_then(|i| >>::try_into(*i).ok()) + cache + .get(who) + .and_then(|i| >>::try_into(*i).ok()) } } @@ -173,7 +177,11 @@ pub fn stake_of_fn_linear( snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, ) -> impl Fn(&T::AccountId) -> VoteWeight + '_ { move |who| { - snapshot.iter().find(|(x, _, _)| x == who).map(|(_, x, _)| *x).unwrap_or_default() + snapshot + .iter() + .find(|(x, _, _)| x == who) + .map(|(_, x, _)| *x) + .unwrap_or_default() } } diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index e552335d02530d7981b18dee2361879a7e52d6a0..905492d6ca04c44f2fbdc7f342e86f5afca45364 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -48,7 +48,7 @@ //! //! ### Signed Phase //! -//! In the signed phase, solutions (of type [`RawSolution`]) are submitted and queued on chain. A +//! In the signed phase, solutions (of type [`RawSolution`]) are submitted and queued on chain. A //! deposit is reserved, based on the size of the solution, for the cost of keeping this solution //! on-chain for a number of blocks, and the potential weight of the solution upon being checked. A //! maximum of `pallet::Config::MaxSignedSubmissions` solutions are stored. The queue is always @@ -228,34 +228,31 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode}; +use frame_election_provider_support::{onchain, ElectionDataProvider, ElectionProvider}; use frame_support::{ dispatch::DispatchResultWithPostInfo, ensure, - traits::{Currency, Get, ReservableCurrency, OnUnbalanced}, + traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, weights::Weight, }; use frame_system::{ensure_none, offchain::SendTransactionTypes}; -use frame_election_provider_support::{ElectionDataProvider, ElectionProvider, onchain}; +use sp_arithmetic::{ + traits::{CheckedAdd, Zero}, + UpperOf, +}; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, CompactSolution, ElectionScore, - EvaluateSupport, PerThing128, Supports, VoteWeight, + assignment_ratio_to_staked_normalized, CompactSolution, ElectionScore, EvaluateSupport, + PerThing128, Supports, VoteWeight, }; use sp_runtime::{ + traits::Bounded, transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, TransactionValidityError, ValidTransaction, }, DispatchError, PerThing, Perbill, RuntimeDebug, SaturatedConversion, - traits::Bounded, -}; -use sp_std::{ - convert::TryInto, - prelude::*, -}; -use sp_arithmetic::{ - UpperOf, - traits::{Zero, CheckedAdd}, }; +use sp_std::{convert::TryInto, prelude::*}; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; @@ -562,7 +559,9 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + SendTransactionTypes> { - type Event: From> + IsType<::Event> + TryInto>; + type Event: From> + + IsType<::Event> + + TryInto>; /// Currency type. type Currency: ReservableCurrency + Currency; @@ -701,21 +700,22 @@ pub mod pallet { Ok(snap_weight) => { log!(info, "Starting signed phase round {}.", Self::round()); T::WeightInfo::on_initialize_open_signed().saturating_add(snap_weight) - } + }, Err(why) => { // Not much we can do about this at this point. log!(warn, "failed to open signed phase due to {:?}", why); T::WeightInfo::on_initialize_nothing() // NOTE: ^^ The trait specifies that this is a noop in terms of weight // in case of error. - } + }, } - } + }, Phase::Signed | Phase::Off if remaining <= unsigned_deadline && remaining > Zero::zero() => { // our needs vary according to whether or not the unsigned phase follows a signed phase - let (need_snapshot, enabled, signed_weight) = if current_phase == Phase::Signed { + let (need_snapshot, enabled, signed_weight) = if current_phase == Phase::Signed + { // there was previously a signed phase: close the signed phase, no need for snapshot. // // Notes: @@ -744,14 +744,14 @@ pub mod pallet { }; base_weight.saturating_add(snap_weight).saturating_add(signed_weight) - } + }, Err(why) => { // Not much we can do about this at this point. log!(warn, "failed to open unsigned phase due to {:?}", why); T::WeightInfo::on_initialize_nothing() // NOTE: ^^ The trait specifies that this is a noop in terms of weight // in case of error. - } + }, } } _ => T::WeightInfo::on_initialize_nothing(), @@ -759,15 +759,16 @@ pub mod pallet { } fn offchain_worker(now: T::BlockNumber) { - use sp_runtime::offchain::storage_lock::{StorageLock, BlockAndTime}; + use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; // Create a lock with the maximum deadline of number of blocks in the unsigned phase. // This should only come useful in an **abrupt** termination of execution, otherwise the // guard will be dropped upon successful execution. - let mut lock = StorageLock::>>::with_block_deadline( - unsigned::OFFCHAIN_LOCK, - T::UnsignedPhase::get().saturated_into(), - ); + let mut lock = + StorageLock::>>::with_block_deadline( + unsigned::OFFCHAIN_LOCK, + T::UnsignedPhase::get().saturated_into(), + ); match lock.try_lock() { Ok(_guard) => { @@ -775,7 +776,7 @@ pub mod pallet { }, Err(deadline) => { log!(debug, "offchain worker lock not released, deadline is {:?}", deadline); - } + }, }; } @@ -857,8 +858,7 @@ pub mod pallet { witness: SolutionOrSnapshotSize, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; - let error_message = - "Invalid unsigned submission must produce invalid block and \ + let error_message = "Invalid unsigned submission must produce invalid block and \ deprive validator from their authoring reward."; // Check score being an improvement, phase, and desired targets. @@ -921,11 +921,8 @@ pub mod pallet { // Note: we don't `rotate_round` at this point; the next call to // `ElectionProvider::elect` will succeed and take care of that. - let solution = ReadySolution { - supports, - score: [0, 0, 0], - compute: ElectionCompute::Emergency, - }; + let solution = + ReadySolution { supports, score: [0, 0, 0], compute: ElectionCompute::Emergency }; >::put(solution); Ok(()) @@ -954,7 +951,8 @@ pub mod pallet { // ensure witness data is correct. ensure!( - num_signed_submissions >= >::decode_len().unwrap_or_default() as u32, + num_signed_submissions >= + >::decode_len().unwrap_or_default() as u32, Error::::SignedInvalidWitness, ); @@ -989,8 +987,7 @@ pub mod pallet { }; // collect deposit. Thereafter, the function cannot fail. - T::Currency::reserve(&who, deposit) - .map_err(|_| Error::::SignedCannotPayDeposit)?; + T::Currency::reserve(&who, deposit).map_err(|_| Error::::SignedCannotPayDeposit)?; let ejected_a_solution = maybe_removed.is_some(); // if we had to remove the weakest solution, unreserve its deposit @@ -1068,10 +1065,8 @@ pub mod pallet { if let Call::submit_unsigned(solution, _) = call { // Discard solution not coming from the local OCW. match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } - _ => { - return InvalidTransaction::Call.into(); - } + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, + _ => return InvalidTransaction::Call.into(), } let _ = Self::unsigned_pre_dispatch_checks(solution) @@ -1084,9 +1079,8 @@ pub mod pallet { ValidTransaction::with_tag_prefix("OffchainElection") // The higher the score[0], the better a solution is. .priority( - T::MinerTxPriority::get().saturating_add( - solution.score[0].saturated_into() - ), + T::MinerTxPriority::get() + .saturating_add(solution.score[0].saturated_into()), ) // Used to deduplicate unsigned solutions: each validator should produce one // solution per round at most, and solutions are not propagate. @@ -1219,20 +1213,18 @@ impl Pallet { match current_phase { Phase::Unsigned((true, opened)) if opened == now => { // Mine a new solution, cache it, and attempt to submit it - let initial_output = Self::ensure_offchain_repeat_frequency(now).and_then(|_| { - Self::mine_check_save_submit() - }); + let initial_output = Self::ensure_offchain_repeat_frequency(now) + .and_then(|_| Self::mine_check_save_submit()); log!(debug, "initial offchain thread output: {:?}", initial_output); - } + }, Phase::Unsigned((true, opened)) if opened < now => { // Try and resubmit the cached solution, and recompute ONLY if it is not // feasible. - let resubmit_output = Self::ensure_offchain_repeat_frequency(now).and_then(|_| { - Self::restore_or_compute_then_maybe_submit() - }); + let resubmit_output = Self::ensure_offchain_repeat_frequency(now) + .and_then(|_| Self::restore_or_compute_then_maybe_submit()); log!(debug, "resubmit offchain thread output: {:?}", resubmit_output); - } - _ => {} + }, + _ => {}, } // After election finalization, clear OCW solution storage. @@ -1242,9 +1234,7 @@ impl Pallet { let local_event = ::Event::from(event_record.event); local_event.try_into().ok() }) - .any(|event| { - matches!(event, Event::ElectionFinalized(_)) - }) + .any(|event| matches!(event, Event::ElectionFinalized(_))) { unsigned::kill_ocw_solution::(); } @@ -1308,14 +1298,12 @@ impl Pallet { // Defensive-only. if targets.len() > target_limit || voters.len() > voter_limit { debug_assert!(false, "Snapshot limit has not been respected."); - return Err(ElectionError::DataProvider("Snapshot too big for submission.")); + return Err(ElectionError::DataProvider("Snapshot too big for submission.")) } // Only write snapshot if all existed. - let metadata = SolutionOrSnapshotSize { - voters: voters.len() as u32, - targets: targets.len() as u32, - }; + let metadata = + SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32 }; log!(debug, "creating a snapshot with metadata {:?}", metadata); >::put(metadata); @@ -1335,7 +1323,10 @@ impl Pallet { debug_assert!(buffer.len() == size && size == buffer.capacity()); sp_io::storage::set(&>::hashed_key(), &buffer); - Ok(w1.saturating_add(w2).saturating_add(w3).saturating_add(T::DbWeight::get().writes(3))) + Ok(w1 + .saturating_add(w2) + .saturating_add(w3) + .saturating_add(T::DbWeight::get().writes(3))) } /// Kill everything created by [`Pallet::create_snapshot`]. @@ -1369,9 +1360,9 @@ impl Pallet { // Ensure that the solution's score can pass absolute min-score. let submitted_score = solution.score.clone(); ensure!( - Self::minimum_untrusted_score().map_or(true, |min_score| + Self::minimum_untrusted_score().map_or(true, |min_score| { sp_npos_elections::is_score_better(submitted_score, min_score, Perbill::zero()) - ), + }), FeasibilityError::UntrustedScoreTooLow ); @@ -1418,7 +1409,7 @@ impl Pallet { // Check that all of the targets are valid based on the snapshot. if assignment.distribution.iter().any(|(d, _)| !targets.contains(d)) { - return Err(FeasibilityError::InvalidVote); + return Err(FeasibilityError::InvalidVote) } Ok(()) }) @@ -1494,8 +1485,13 @@ impl Pallet { .fold(Zero::zero(), |acc, next| acc + next.voters.len() as u32); Ok(( supports, - T::WeightInfo::elect_queued(metadata.voters, metadata.targets, active_voters, desired), - compute + T::WeightInfo::elect_queued( + metadata.voters, + metadata.targets, + active_voters, + desired, + ), + compute, )) }, ) @@ -1526,12 +1522,12 @@ impl ElectionProvider for Pallet { // All went okay, put sign to be Off, clean snapshot, etc. Self::rotate_round(); Ok((supports, weight)) - } + }, Err(why) => { log!(error, "Entering emergency mode: {:?}", why); >::put(Phase::Emergency); Err(why) - } + }, } } } @@ -1553,11 +1549,9 @@ mod feasibility_check { //! that is invalid, but gets through the system as valid. use super::*; - use crate::{ - mock::{ - MultiPhase, Runtime, roll_to, TargetIndex, raw_solution, EpochLength, UnsignedPhase, - SignedPhase, VoterIndex, ExtBuilder, - }, + use crate::mock::{ + raw_solution, roll_to, EpochLength, ExtBuilder, MultiPhase, Runtime, SignedPhase, + TargetIndex, UnsignedPhase, VoterIndex, }; use frame_support::assert_noop; @@ -1728,11 +1722,11 @@ mod feasibility_check { mod tests { use super::*; use crate::{ - Phase, mock::{ - ExtBuilder, MultiPhase, Runtime, roll_to, MockWeightInfo, AccountId, TargetIndex, - Targets, multi_phase_events, System, SignedMaxSubmissions, + multi_phase_events, roll_to, AccountId, ExtBuilder, MockWeightInfo, MultiPhase, + Runtime, SignedMaxSubmissions, System, TargetIndex, Targets, }, + Phase, }; use frame_election_provider_support::ElectionProvider; use frame_support::{assert_noop, assert_ok}; @@ -2002,7 +1996,6 @@ mod tests { roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); - let (solution, _) = MultiPhase::mine_solution(2).unwrap(); // Default solution has a score of [50, 100, 5000]. assert_eq!(solution.score, [50, 100, 5000]); @@ -2012,10 +2005,7 @@ mod tests { >::put([51, 0, 0]); assert_noop!( - MultiPhase::feasibility_check( - solution, - ElectionCompute::Signed - ), + MultiPhase::feasibility_check(solution, ElectionCompute::Signed), FeasibilityError::UntrustedScoreTooLow, ); }) @@ -2039,9 +2029,9 @@ mod tests { }; let mut active = 1; - while weight_with(active) - <= ::BlockWeights::get().max_block - || active == all_voters + while weight_with(active) <= + ::BlockWeights::get().max_block || + active == all_voters { active += 1; } diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index 55fa58590ce714819b47c99c07dd749cb4ddd2cc..c5007733c1e339b333fe7a17a7cedc74e1c2fb1d 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -17,13 +17,10 @@ use super::*; use crate as multi_phase; -use multi_phase::unsigned::{IndexAssignmentOf, Voter}; +use frame_election_provider_support::{data_provider, ElectionDataProvider}; pub use frame_support::{assert_noop, assert_ok}; -use frame_support::{ - parameter_types, - traits::{Hooks}, - weights::Weight, -}; +use frame_support::{parameter_types, traits::Hooks, weights::Weight}; +use multi_phase::unsigned::{IndexAssignmentOf, Voter}; use parking_lot::RwLock; use sp_core::{ offchain::{ @@ -32,7 +29,6 @@ use sp_core::{ }, H256, }; -use frame_election_provider_support::{ElectionDataProvider, data_provider}; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, seq_phragmen, to_supports, to_without_backing, CompactSolution, ElectionResult, EvaluateSupport, @@ -405,7 +401,7 @@ impl ElectionDataProvider for StakingMock { let targets = Targets::get(); if maybe_max_len.map_or(false, |max_len| targets.len() > max_len) { - return Err("Targets too big"); + return Err("Targets too big") } Ok((targets, 0)) @@ -416,7 +412,7 @@ impl ElectionDataProvider for StakingMock { ) -> data_provider::Result<(Vec<(AccountId, VoteWeight, Vec)>, Weight)> { let voters = Voters::get(); if maybe_max_len.map_or(false, |max_len| voters.len() > max_len) { - return Err("Voters too big"); + return Err("Voters too big") } Ok((voters, 0)) diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index 1aaf96b8add94cb3d3f269c52d4369ed7dd242fc..c91c923d93e902a871a09d33d870123ff4d47f26 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -18,11 +18,11 @@ //! The signed phase implementation. use crate::{ - CompactOf, Config, ElectionCompute, Pallet, RawSolution, ReadySolution, SolutionOrSnapshotSize, - Weight, WeightInfo, QueuedSolution, SignedSubmissionsMap, SignedSubmissionIndices, - SignedSubmissionNextIndex, + CompactOf, Config, ElectionCompute, Pallet, QueuedSolution, RawSolution, ReadySolution, + SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, + SolutionOrSnapshotSize, Weight, WeightInfo, }; -use codec::{Encode, Decode, HasCompact}; +use codec::{Decode, Encode, HasCompact}; use frame_support::{ storage::bounded_btree_map::BoundedBTreeMap, traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, @@ -31,8 +31,8 @@ use frame_support::{ use sp_arithmetic::traits::SaturatedConversion; use sp_npos_elections::{is_score_better, CompactSolution, ElectionScore}; use sp_runtime::{ - RuntimeDebug, traits::{Saturating, Zero}, + RuntimeDebug, }; use sp_std::{ cmp::Ordering, @@ -131,24 +131,30 @@ impl SignedSubmissions { deletion_overlay: BTreeSet::new(), }; // validate that the stored state is sane - debug_assert!(submissions.indices.values().copied().max().map_or( - true, - |max_idx| submissions.next_idx > max_idx, - )); + debug_assert!(submissions + .indices + .values() + .copied() + .max() + .map_or(true, |max_idx| submissions.next_idx > max_idx,)); submissions } /// Put the signed submissions back into storage. pub fn put(mut self) { // validate that we're going to write only sane things to storage - debug_assert!(self.insertion_overlay.keys().copied().max().map_or( - true, - |max_idx| self.next_idx > max_idx, - )); - debug_assert!(self.indices.values().copied().max().map_or( - true, - |max_idx| self.next_idx > max_idx, - )); + debug_assert!(self + .insertion_overlay + .keys() + .copied() + .max() + .map_or(true, |max_idx| self.next_idx > max_idx,)); + debug_assert!(self + .indices + .values() + .copied() + .max() + .map_or(true, |max_idx| self.next_idx > max_idx,)); SignedSubmissionIndices::::put(self.indices); SignedSubmissionNextIndex::::put(self.next_idx); @@ -203,10 +209,12 @@ impl SignedSubmissions { } self.insertion_overlay.remove(&remove_idx).or_else(|| { - (!self.deletion_overlay.contains(&remove_idx)).then(|| { - self.deletion_overlay.insert(remove_idx); - SignedSubmissionsMap::::try_get(remove_idx).ok() - }).flatten() + (!self.deletion_overlay.contains(&remove_idx)) + .then(|| { + self.deletion_overlay.insert(remove_idx); + SignedSubmissionsMap::::try_get(remove_idx).ok() + }) + .flatten() }) } @@ -256,10 +264,7 @@ impl SignedSubmissions { /// /// In the event that the new submission is not better than the current weakest according /// to `is_score_better`, we do not change anything. - pub fn insert( - &mut self, - submission: SignedSubmissionOf, - ) -> InsertResult { + pub fn insert(&mut self, submission: SignedSubmissionOf) -> InsertResult { // verify the expectation that we never reuse an index debug_assert!(!self.indices.values().any(|&idx| idx == self.next_idx)); @@ -271,12 +276,12 @@ impl SignedSubmissions { self.indices .try_insert(submission.solution.score, prev_idx) .expect("didn't change the map size; qed"); - return InsertResult::NotInserted; - } + return InsertResult::NotInserted + }, Ok(None) => { // successfully inserted into the set; no need to take out weakest member None - } + }, Err((insert_score, insert_idx)) => { // could not insert into the set because it is full. // note that we short-circuit return here in case the iteration produces `None`. @@ -290,11 +295,11 @@ impl SignedSubmissions { // if we haven't improved on the weakest score, don't change anything. if !is_score_better(insert_score, weakest_score, threshold) { - return InsertResult::NotInserted; + return InsertResult::NotInserted } self.swap_out_submission(weakest_score, Some((insert_score, insert_idx))) - } + }, }; // we've taken out the weakest, so update the storage map and the next index @@ -349,17 +354,12 @@ impl Pallet { let reward = T::SignedRewardBase::get(); while let Some(best) = all_submissions.pop_last() { - let SignedSubmission { solution, who, deposit} = best; + let SignedSubmission { solution, who, deposit } = best; let active_voters = solution.compact.voter_count() as u32; let feasibility_weight = { // defensive only: at the end of signed phase, snapshot will exits. let desired_targets = Self::desired_targets().unwrap_or_default(); - T::WeightInfo::feasibility_check( - voters, - targets, - active_voters, - desired_targets, - ) + T::WeightInfo::feasibility_check(voters, targets, active_voters, desired_targets) }; // the feasibility check itself has some weight weight = weight.saturating_add(feasibility_weight); @@ -375,13 +375,13 @@ impl Pallet { weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_accept_solution()); - break; - } + break + }, Err(_) => { Self::finalize_signed_phase_reject_solution(&who, deposit); weight = weight .saturating_add(T::WeightInfo::finalize_signed_phase_reject_solution()); - } + }, } } @@ -398,7 +398,12 @@ impl Pallet { debug_assert!(!SignedSubmissionNextIndex::::exists()); debug_assert!(SignedSubmissionsMap::::iter().next().is_none()); - log!(debug, "closed signed phase, found solution? {}, discarded {}", found_solution, discarded); + log!( + debug, + "closed signed phase, found solution? {}, discarded {}", + found_solution, + discarded + ); (found_solution, weight) } @@ -469,9 +474,12 @@ impl Pallet { let feasibility_weight = Self::feasibility_weight_of(solution, size); let len_deposit = T::SignedDepositByte::get().saturating_mul(encoded_len); - let weight_deposit = T::SignedDepositWeight::get().saturating_mul(feasibility_weight.saturated_into()); + let weight_deposit = + T::SignedDepositWeight::get().saturating_mul(feasibility_weight.saturated_into()); - T::SignedDepositBase::get().saturating_add(len_deposit).saturating_add(weight_deposit) + T::SignedDepositBase::get() + .saturating_add(len_deposit) + .saturating_add(weight_deposit) } } @@ -479,13 +487,13 @@ impl Pallet { mod tests { use super::*; use crate::{ - Phase, Error, mock::{ - balances, ExtBuilder, MultiPhase, Origin, raw_solution, roll_to, Runtime, + balances, raw_solution, roll_to, ExtBuilder, MultiPhase, Origin, Runtime, SignedMaxSubmissions, SignedMaxWeight, }, + Error, Phase, }; - use frame_support::{dispatch::DispatchResult, assert_noop, assert_storage_noop, assert_ok}; + use frame_support::{assert_noop, assert_ok, assert_storage_noop, dispatch::DispatchResult}; fn submit_with_witness( origin: Origin, @@ -626,7 +634,6 @@ mod tests { assert_ok!(submit_with_witness(Origin::signed(99), solution)); } - // weaker. let solution = RawSolution { score: [4, 0, 0], ..Default::default() }; @@ -810,33 +817,36 @@ mod tests { #[test] fn cannot_consume_too_much_future_weight() { - ExtBuilder::default().signed_weight(40).mock_weight_info(true).build_and_execute(|| { - roll_to(15); - assert!(MultiPhase::current_phase().is_signed()); - - let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); - let solution_weight = ::WeightInfo::feasibility_check( - witness.voters, - witness.targets, - solution.compact.voter_count() as u32, - solution.compact.unique_targets().len() as u32, - ); - // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 35); - assert_eq!(solution.compact.voter_count(), 5); - assert_eq!(::SignedMaxWeight::get(), 40); - - assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); - - ::set(30); - - // note: resubmitting the same solution is technically okay as long as the queue has - // space. - assert_noop!( - submit_with_witness(Origin::signed(99), solution), - Error::::SignedTooMuchWeight, - ); - }) + ExtBuilder::default() + .signed_weight(40) + .mock_weight_info(true) + .build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_signed()); + + let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); + let solution_weight = ::WeightInfo::feasibility_check( + witness.voters, + witness.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 35); + assert_eq!(solution.compact.voter_count(), 5); + assert_eq!(::SignedMaxWeight::get(), 40); + + assert_ok!(submit_with_witness(Origin::signed(99), solution.clone())); + + ::set(30); + + // note: resubmitting the same solution is technically okay as long as the queue has + // space. + assert_noop!( + submit_with_witness(Origin::signed(99), solution), + Error::::SignedTooMuchWeight, + ); + }) } #[test] diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index aaeb5e4c0c9e4703f8431a7e67a9c0ecbc07d2e7..93e3878a71526a7c8e274d4797b0b40ed0c02166 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -21,19 +21,18 @@ use crate::{ helpers, Call, CompactAccuracyOf, CompactOf, Config, ElectionCompute, Error, FeasibilityError, Pallet, RawSolution, ReadySolution, RoundSnapshot, SolutionOrSnapshotSize, Weight, WeightInfo, }; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_support::{dispatch::DispatchResult, ensure, traits::Get}; use frame_system::offchain::SubmitTransaction; use sp_arithmetic::Perbill; use sp_npos_elections::{ - CompactSolution, ElectionResult, assignment_ratio_to_staked_normalized, - assignment_staked_to_ratio_normalized, is_score_better, seq_phragmen, + assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, is_score_better, + seq_phragmen, CompactSolution, ElectionResult, }; use sp_runtime::{ - DispatchError, - SaturatedConversion, offchain::storage::{MutateStorageError, StorageValueRef}, traits::TrailingZeroInput, + DispatchError, SaturatedConversion, }; use sp_std::{cmp::Ordering, convert::TryFrom, vec::Vec}; @@ -54,10 +53,8 @@ pub type Voter = ( ); /// The relative distribution of a voter's stake among the winning targets. -pub type Assignment = sp_npos_elections::Assignment< - ::AccountId, - CompactAccuracyOf, ->; +pub type Assignment = + sp_npos_elections::Assignment<::AccountId, CompactAccuracyOf>; /// The [`IndexAssignment`][sp_npos_elections::IndexAssignment] type specialized for a particular /// runtime `T`. @@ -105,7 +102,8 @@ fn save_solution(call: &Call) -> Result<(), MinerError> { let storage = StorageValueRef::persistent(&OFFCHAIN_CACHED_CALL); match storage.mutate::<_, (), _>(|_| Ok(call.clone())) { Ok(_) => Ok(()), - Err(MutateStorageError::ConcurrentModification(_)) => Err(MinerError::FailedToStoreSolution), + Err(MutateStorageError::ConcurrentModification(_)) => + Err(MinerError::FailedToStoreSolution), Err(MutateStorageError::ValueFunctionFailed(_)) => { // this branch should be unreachable according to the definition of // `StorageValueRef::mutate`: that function should only ever `Err` if the closure we @@ -151,44 +149,45 @@ impl Pallet { /// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way, submit /// if our call's score is greater than that of the cached solution. pub fn restore_or_compute_then_maybe_submit() -> Result<(), MinerError> { - log!(debug,"miner attempting to restore or compute an unsigned solution."); + log!(debug, "miner attempting to restore or compute an unsigned solution."); let call = restore_solution::() - .and_then(|call| { - // ensure the cached call is still current before submitting - if let Call::submit_unsigned(solution, _) = &call { - // prevent errors arising from state changes in a forkful chain - Self::basic_checks(solution, "restored")?; - Ok(call) - } else { - Err(MinerError::SolutionCallInvalid) - } - }).or_else::(|error| { - log!(debug, "restoring solution failed due to {:?}", error); - match error { - MinerError::NoStoredSolution => { - log!(trace, "mining a new solution."); - // if not present or cache invalidated due to feasibility, regenerate. - // note that failing `Feasibility` can only mean that the solution was - // computed over a snapshot that has changed due to a fork. - let call = Self::mine_checked_call()?; - save_solution(&call)?; + .and_then(|call| { + // ensure the cached call is still current before submitting + if let Call::submit_unsigned(solution, _) = &call { + // prevent errors arising from state changes in a forkful chain + Self::basic_checks(solution, "restored")?; Ok(call) + } else { + Err(MinerError::SolutionCallInvalid) } - MinerError::Feasibility(_) => { - log!(trace, "wiping infeasible solution."); - // kill the infeasible solution, hopefully in the next runs (whenever they - // may be) we mine a new one. - kill_ocw_solution::(); - clear_offchain_repeat_frequency(); - Err(error) - }, - _ => { - // nothing to do. Return the error as-is. - Err(error) + }) + .or_else::(|error| { + log!(debug, "restoring solution failed due to {:?}", error); + match error { + MinerError::NoStoredSolution => { + log!(trace, "mining a new solution."); + // if not present or cache invalidated due to feasibility, regenerate. + // note that failing `Feasibility` can only mean that the solution was + // computed over a snapshot that has changed due to a fork. + let call = Self::mine_checked_call()?; + save_solution(&call)?; + Ok(call) + }, + MinerError::Feasibility(_) => { + log!(trace, "wiping infeasible solution."); + // kill the infeasible solution, hopefully in the next runs (whenever they + // may be) we mine a new one. + kill_ocw_solution::(); + clear_offchain_repeat_frequency(); + Err(error) + }, + _ => { + // nothing to do. Return the error as-is. + Err(error) + }, } - } - })?; + })?; Self::submit_call(call) } @@ -240,10 +239,12 @@ impl Pallet { MinerError::PreDispatchChecksFailed(err) })?; - Self::feasibility_check(raw_solution.clone(), ElectionCompute::Unsigned).map_err(|err| { - log!(debug, "feasibility check failed for {} solution: {:?}", solution_type, err); - err - })?; + Self::feasibility_check(raw_solution.clone(), ElectionCompute::Unsigned).map_err( + |err| { + log!(debug, "feasibility check failed for {} solution: {:?}", solution_type, err); + err + }, + )?; Ok(()) } @@ -347,11 +348,7 @@ impl Pallet { // converting to `Compact`. let mut index_assignments = sorted_assignments .into_iter() - .map(|assignment| IndexAssignmentOf::::new( - &assignment, - &voter_index, - &target_index, - )) + .map(|assignment| IndexAssignmentOf::::new(&assignment, &voter_index, &target_index)) .collect::, _>>()?; // trim assignments list for weight and length. @@ -390,10 +387,10 @@ impl Pallet { max @ _ => { let seed = sp_io::offchain::random_seed(); let random = ::decode(&mut TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed") - % max.saturating_add(1); + .expect("input is padded with zeroes; qed") % + max.saturating_add(1); random as usize - } + }, } } @@ -418,18 +415,16 @@ impl Pallet { max_weight: Weight, assignments: &mut Vec>, ) { - let maximum_allowed_voters = Self::maximum_voter_for_weight::( - desired_targets, - size, - max_weight, - ); - let removing: usize = assignments.len().saturating_sub( - maximum_allowed_voters.saturated_into(), - ); + let maximum_allowed_voters = + Self::maximum_voter_for_weight::(desired_targets, size, max_weight); + let removing: usize = + assignments.len().saturating_sub(maximum_allowed_voters.saturated_into()); log!( debug, "from {} assignments, truncating to {} for weight, removing {}", - assignments.len(), maximum_allowed_voters, removing, + assignments.len(), + maximum_allowed_voters, + removing, ); assignments.truncate(maximum_allowed_voters as usize); } @@ -461,7 +456,7 @@ impl Pallet { // not much we can do if assignments are already empty. if high == low { - return Ok(()); + return Ok(()) } while high - low > 1 { @@ -472,22 +467,21 @@ impl Pallet { high = test; } } - let maximum_allowed_voters = - if low < assignments.len() && - encoded_size_of(&assignments[..low + 1])? <= max_allowed_length - { - low + 1 - } else { - low - }; + let maximum_allowed_voters = if low < assignments.len() && + encoded_size_of(&assignments[..low + 1])? <= max_allowed_length + { + low + 1 + } else { + low + }; // ensure our post-conditions are correct debug_assert!( encoded_size_of(&assignments[..maximum_allowed_voters]).unwrap() <= max_allowed_length ); debug_assert!(if maximum_allowed_voters < assignments.len() { - encoded_size_of(&assignments[..maximum_allowed_voters + 1]).unwrap() - > max_allowed_length + encoded_size_of(&assignments[..maximum_allowed_voters + 1]).unwrap() > + max_allowed_length } else { true }); @@ -517,7 +511,7 @@ impl Pallet { max_weight: Weight, ) -> u32 { if size.voters < 1 { - return size.voters; + return size.voters } let max_voters = size.voters.max(1); @@ -536,7 +530,7 @@ impl Pallet { Some(voters) if voters < max_voters => Ok(voters), _ => Err(()), } - } + }, Ordering::Greater => voters.checked_sub(step).ok_or(()), Ordering::Equal => Ok(voters), } @@ -551,11 +545,9 @@ impl Pallet { // proceed with the binary search Ok(next) if next != voters => { voters = next; - } + }, // we are out of bounds, break out of the loop. - Err(()) => { - break; - } + Err(()) => break, // we found the right value - early exit the function. Ok(next) => return next, } @@ -599,17 +591,16 @@ impl Pallet { |maybe_head: Result, _>| { match maybe_head { Ok(Some(head)) if now < head => Err("fork."), - Ok(Some(head)) if now >= head && now <= head + threshold => { - Err("recently executed.") - } + Ok(Some(head)) if now >= head && now <= head + threshold => + Err("recently executed."), Ok(Some(head)) if now > head + threshold => { // we can run again now. Write the new head. Ok(now) - } + }, _ => { // value doesn't exists. Probably this node just booted up. Write, and run Ok(now) - } + }, } }, ); @@ -632,9 +623,7 @@ impl Pallet { /// /// NOTE: Ideally, these tests should move more and more outside of this and more to the miner's /// code, so that we do less and less storage reads here. - pub fn unsigned_pre_dispatch_checks( - solution: &RawSolution>, - ) -> DispatchResult { + pub fn unsigned_pre_dispatch_checks(solution: &RawSolution>) -> DispatchResult { // ensure solution is timely. Don't panic yet. This is a cheap check. ensure!(Self::current_phase().is_unsigned_open(), Error::::PreDispatchEarlySubmission); @@ -643,8 +632,8 @@ impl Pallet { // ensure correct number of winners. ensure!( - Self::desired_targets().unwrap_or_default() - == solution.compact.unique_targets().len() as u32, + Self::desired_targets().unwrap_or_default() == + solution.compact.unique_targets().len() as u32, Error::::PreDispatchWrongWinnerCount, ); @@ -761,19 +750,22 @@ mod max_weight { mod tests { use super::*; use crate::{ - CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, - TransactionValidityError, mock::{ - Call as OuterCall, ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, Origin, Runtime, - TestCompact, TrimHelpers, roll_to, roll_to_with_ocw, trim_helpers, witness, - UnsignedPhase, BlockNumber, System, + roll_to, roll_to_with_ocw, trim_helpers, witness, BlockNumber, Call as OuterCall, + ExtBuilder, Extrinsic, MinerMaxWeight, MultiPhase, Origin, Runtime, System, + TestCompact, TrimHelpers, UnsignedPhase, }, + CurrentPhase, InvalidTransaction, Phase, QueuedSolution, TransactionSource, + TransactionValidityError, }; use frame_benchmarking::Zero; use frame_support::{assert_noop, assert_ok, dispatch::Dispatchable, traits::OffchainWorker}; use sp_npos_elections::IndexAssignment; - use sp_runtime::offchain::storage_lock::{StorageLock, BlockAndTime}; - use sp_runtime::{traits::ValidateUnsigned, PerU16}; + use sp_runtime::{ + offchain::storage_lock::{BlockAndTime, StorageLock}, + traits::ValidateUnsigned, + PerU16, + }; type Assignment = crate::unsigned::Assignment; @@ -786,8 +778,11 @@ mod tests { // initial assert_eq!(MultiPhase::current_phase(), Phase::Off); assert!(matches!( - ::validate_unsigned(TransactionSource::Local, &call) - .unwrap_err(), + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) )); assert!(matches!( @@ -799,8 +794,11 @@ mod tests { roll_to(15); assert_eq!(MultiPhase::current_phase(), Phase::Signed); assert!(matches!( - ::validate_unsigned(TransactionSource::Local, &call) - .unwrap_err(), + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) )); assert!(matches!( @@ -823,8 +821,11 @@ mod tests { >::put(Phase::Unsigned((false, 25))); assert!(MultiPhase::current_phase().is_unsigned()); assert!(matches!( - ::validate_unsigned(TransactionSource::Local, &call) - .unwrap_err(), + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) )); assert!(matches!( @@ -895,23 +896,27 @@ mod tests { #[test] fn priority_is_set() { - ExtBuilder::default().miner_tx_priority(20).desired_targets(0).build_and_execute(|| { - roll_to(25); - assert!(MultiPhase::current_phase().is_unsigned()); - - let solution = RawSolution:: { score: [5, 0, 0], ..Default::default() }; - let call = Call::submit_unsigned(solution.clone(), witness()); + ExtBuilder::default() + .miner_tx_priority(20) + .desired_targets(0) + .build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &call - ) - .unwrap() - .priority, - 25 - ); - }) + let solution = + RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned(solution.clone(), witness()); + + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap() + .priority, + 25 + ); + }) } #[test] @@ -974,35 +979,38 @@ mod tests { #[test] fn miner_trims_weight() { - ExtBuilder::default().miner_weight(100).mock_weight_info(true).build_and_execute(|| { - roll_to(25); - assert!(MultiPhase::current_phase().is_unsigned()); - - let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); - let solution_weight = ::WeightInfo::submit_unsigned( - witness.voters, - witness.targets, - solution.compact.voter_count() as u32, - solution.compact.unique_targets().len() as u32, - ); - // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 35); - assert_eq!(solution.compact.voter_count(), 5); - - // now reduce the max weight - ::set(25); + ExtBuilder::default() + .miner_weight(100) + .mock_weight_info(true) + .build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); - let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); - let solution_weight = ::WeightInfo::submit_unsigned( - witness.voters, - witness.targets, - solution.compact.voter_count() as u32, - solution.compact.unique_targets().len() as u32, - ); - // default solution will have 5 edges (5 * 5 + 10) - assert_eq!(solution_weight, 25); - assert_eq!(solution.compact.voter_count(), 3); - }) + let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); + let solution_weight = ::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 35); + assert_eq!(solution.compact.voter_count(), 5); + + // now reduce the max weight + ::set(25); + + let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); + let solution_weight = ::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 25); + assert_eq!(solution.compact.voter_count(), 3); + }) } #[test] @@ -1014,7 +1022,7 @@ mod tests { assert_eq!( MultiPhase::mine_check_save_submit().unwrap_err(), - MinerError::PreDispatchChecksFailed(DispatchError::Module{ + MinerError::PreDispatchChecksFailed(DispatchError::Module { index: 2, error: 1, message: Some("PreDispatchWrongWinnerCount"), @@ -1360,15 +1368,14 @@ mod tests { }; // Custom(7) maps to PreDispatchChecksFailed - let pre_dispatch_check_error = TransactionValidityError::Invalid( - InvalidTransaction::Custom(7), - ); + let pre_dispatch_check_error = + TransactionValidityError::Invalid(InvalidTransaction::Custom(7)); assert_eq!( ::validate_unsigned( TransactionSource::Local, &call, ) - .unwrap_err(), + .unwrap_err(), pre_dispatch_check_error, ); assert_eq!( @@ -1384,21 +1391,14 @@ mod tests { roll_to(25); // given - let TrimHelpers { - mut assignments, - encoded_size_of, - .. - } = trim_helpers(); + let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); let compact = CompactOf::::try_from(assignments.as_slice()).unwrap(); let encoded_len = compact.encoded_size() as u32; let compact_clone = compact.clone(); // when - MultiPhase::trim_assignments_length( - encoded_len, - &mut assignments, - encoded_size_of, - ).unwrap(); + MultiPhase::trim_assignments_length(encoded_len, &mut assignments, encoded_size_of) + .unwrap(); // then let compact = CompactOf::::try_from(assignments.as_slice()).unwrap(); @@ -1412,11 +1412,7 @@ mod tests { roll_to(25); // given - let TrimHelpers { - mut assignments, - encoded_size_of, - .. - } = trim_helpers(); + let TrimHelpers { mut assignments, encoded_size_of, .. } = trim_helpers(); let compact = CompactOf::::try_from(assignments.as_slice()).unwrap(); let encoded_len = compact.encoded_size(); let compact_clone = compact.clone(); @@ -1426,7 +1422,8 @@ mod tests { encoded_len as u32 - 1, &mut assignments, encoded_size_of, - ).unwrap(); + ) + .unwrap(); // then let compact = CompactOf::::try_from(assignments.as_slice()).unwrap(); @@ -1441,33 +1438,26 @@ mod tests { roll_to(25); // given - let TrimHelpers { - voters, - mut assignments, - encoded_size_of, - voter_index, - } = trim_helpers(); + let TrimHelpers { voters, mut assignments, encoded_size_of, voter_index } = + trim_helpers(); let compact = CompactOf::::try_from(assignments.as_slice()).unwrap(); let encoded_len = compact.encoded_size() as u32; let count = assignments.len(); - let min_stake_voter = voters.iter() + let min_stake_voter = voters + .iter() .map(|(id, weight, _)| (weight, id)) .min() .and_then(|(_, id)| voter_index(id)) .unwrap(); // when - MultiPhase::trim_assignments_length( - encoded_len - 1, - &mut assignments, - encoded_size_of, - ).unwrap(); + MultiPhase::trim_assignments_length(encoded_len - 1, &mut assignments, encoded_size_of) + .unwrap(); // then assert_eq!(assignments.len(), count - 1, "we must have removed exactly one assignment"); assert!( - assignments.iter() - .all(|IndexAssignment{ who, ..}| *who != min_stake_voter), + assignments.iter().all(|IndexAssignment { who, .. }| *who != min_stake_voter), "min_stake_voter must no longer be in the set of voters", ); }); diff --git a/substrate/frame/election-provider-multi-phase/src/weights.rs b/substrate/frame/election-provider-multi-phase/src/weights.rs index 0f732784c62c4b304e7e8d02bdf0dcb8cb95f1a3..99fad2f068180c2ca9d6ab2fbbb344f609a44f22 100644 --- a/substrate/frame/election-provider-multi-phase/src/weights.rs +++ b/substrate/frame/election-provider-multi-phase/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index 1d1ebf02a263585bffc140c6b15750bab4935db4..72896e5599138a0badec278a9331b6ea5ff50186 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -164,13 +164,13 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod onchain; -use sp_std::{prelude::*, fmt::Debug}; use frame_support::weights::Weight; +use sp_std::{fmt::Debug, prelude::*}; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; pub use sp_npos_elections::{ - Assignment, ExtendedBalance, PerThing128, Supports, Support, VoteWeight + Assignment, ExtendedBalance, PerThing128, Support, Supports, VoteWeight, }; /// Types that are used by the data provider trait. @@ -224,7 +224,8 @@ pub trait ElectionDataProvider { _voters: Vec<(AccountId, VoteWeight, Vec)>, _targets: Vec, _target_stake: Option, - ) {} + ) { + } /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, /// else a noop. diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs index e034a9c36a8ac588c165fafb92c2c4f610c8e1f4..2e2c286dc6422bf02a1486c1f7dc01fc16fe5120 100644 --- a/substrate/frame/election-provider-support/src/onchain.rs +++ b/substrate/frame/election-provider-support/src/onchain.rs @@ -18,9 +18,9 @@ //! An implementation of [`ElectionProvider`] that does an on-chain sequential phragmen. use crate::{ElectionDataProvider, ElectionProvider}; +use frame_support::{traits::Get, weights::Weight}; use sp_npos_elections::*; use sp_std::{collections::btree_map::BTreeMap, marker::PhantomData, prelude::*}; -use frame_support::{traits::Get, weights::Weight}; /// Errors of the on-chain election. #[derive(Eq, PartialEq, Debug)] @@ -83,9 +83,8 @@ impl ElectionProvider for OnChainSequen stake_map.insert(v.clone(), *s); }); - let stake_of = |w: &T::AccountId| -> VoteWeight { - stake_map.get(w).cloned().unwrap_or_default() - }; + let stake_of = + |w: &T::AccountId| -> VoteWeight { stake_map.get(w).cloned().unwrap_or_default() }; let ElectionResult { winners, assignments } = seq_phragmen::<_, T::Accuracy>(desired_targets as usize, targets, voters, None) @@ -94,16 +93,18 @@ impl ElectionProvider for OnChainSequen let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; let winners = to_without_backing(winners); - to_supports(&winners, &staked).map_err(Error::from).map(|s| (s, T::BlockWeights::get().max_block)) + to_supports(&winners, &staked) + .map_err(Error::from) + .map(|s| (s, T::BlockWeights::get().max_block)) } } #[cfg(test)] mod tests { use super::*; + use frame_support::weights::Weight; use sp_npos_elections::Support; use sp_runtime::Perbill; - use frame_support::weights::Weight; type AccountId = u64; type BlockNumber = u32; @@ -151,20 +152,8 @@ mod tests { assert_eq!( OnChainPhragmen::elect().unwrap().0, vec![ - ( - 10, - Support { - total: 25, - voters: vec![(1, 10), (3, 15)] - } - ), - ( - 30, - Support { - total: 35, - voters: vec![(2, 20), (3, 15)] - } - ) + (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), + (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) ] ); } diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs index 86a01169780677488d1e64c9bf1acbdcf91dbebb..4e19b64ef7a5f650fd7febcbb3ff3a4a0f1519a6 100644 --- a/substrate/frame/elections-phragmen/src/benchmarking.rs +++ b/substrate/frame/elections-phragmen/src/benchmarking.rs @@ -21,9 +21,9 @@ use super::*; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelist}; +use frame_support::{dispatch::DispatchResultWithPostInfo, traits::OnInitialize}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelist, impl_benchmark_test_suite}; -use frame_support::{traits::OnInitialize, dispatch::DispatchResultWithPostInfo}; use crate::Pallet as Elections; @@ -62,28 +62,34 @@ fn candidate_count() -> u32 { } /// Add `c` new candidates. -fn submit_candidates(c: u32, prefix: &'static str) - -> Result, &'static str> -{ - (0..c).map(|i| { - let account = endowed_account::(prefix, i); - >::submit_candidacy( - RawOrigin::Signed(account.clone()).into(), - candidate_count::(), - ).map_err(|_| "failed to submit candidacy")?; - Ok(account) - }).collect::>() +fn submit_candidates( + c: u32, + prefix: &'static str, +) -> Result, &'static str> { + (0..c) + .map(|i| { + let account = endowed_account::(prefix, i); + >::submit_candidacy( + RawOrigin::Signed(account.clone()).into(), + candidate_count::(), + ) + .map_err(|_| "failed to submit candidacy")?; + Ok(account) + }) + .collect::>() } /// Add `c` new candidates with self vote. -fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) - -> Result, &'static str> -{ +fn submit_candidates_with_self_vote( + c: u32, + prefix: &'static str, +) -> Result, &'static str> { let candidates = submit_candidates::(c, prefix)?; let stake = default_stake::(BALANCE_FACTOR); - let _ = candidates.iter().map(|c| - submit_voter::(c.clone(), vec![c.clone()], stake).map(|_| ()) - ).collect::>()?; + let _ = candidates + .iter() + .map(|c| submit_voter::(c.clone(), vec![c.clone()], stake).map(|_| ())) + .collect::>()?; Ok(candidates) } @@ -98,18 +104,16 @@ fn submit_voter( /// create `num_voter` voters who randomly vote for at most `votes` of `all_candidates` if /// available. -fn distribute_voters(mut all_candidates: Vec, num_voters: u32, votes: usize) - -> Result<(), &'static str> -{ +fn distribute_voters( + mut all_candidates: Vec, + num_voters: u32, + votes: usize, +) -> Result<(), &'static str> { let stake = default_stake::(BALANCE_FACTOR); for i in 0..num_voters { // to ensure that votes are different all_candidates.rotate_left(1); - let votes = all_candidates - .iter() - .cloned() - .take(votes) - .collect::>(); + let votes = all_candidates.iter().cloned().take(votes).collect::>(); let voter = endowed_account::("voter", i); submit_voter::(voter, votes, stake)?; } @@ -128,13 +132,11 @@ fn fill_seats_up_to(m: u32) -> Result, &'static str m as usize, "wrong number of members and runners-up", ); - Ok( - >::members() - .into_iter() - .map(|m| m.who) - .chain(>::runners_up().into_iter().map(|r| r.who)) - .collect() - ) + Ok(>::members() + .into_iter() + .map(|m| m.who) + .chain(>::runners_up().into_iter().map(|r| r.who)) + .collect()) } /// removes all the storage items to reverse any genesis state. diff --git a/substrate/frame/elections-phragmen/src/lib.rs b/substrate/frame/elections-phragmen/src/lib.rs index a3232ac0d28ff9a1a344a6c63b9b02d8f0da3503..97147692fd6def3ac9b64e0205215599455b2c00 100644 --- a/substrate/frame/elections-phragmen/src/lib.rs +++ b/substrate/frame/elections-phragmen/src/lib.rs @@ -100,11 +100,11 @@ use codec::{Decode, Encode}; use frame_support::{ - dispatch::{WithPostDispatchInfo}, + dispatch::WithPostDispatchInfo, traits::{ ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency, - WithdrawReasons, SortedMembers, + SortedMembers, WithdrawReasons, }, weights::Weight, }; @@ -113,7 +113,7 @@ use sp_runtime::{ traits::{Saturating, StaticLookup, Zero}, DispatchError, Perbill, RuntimeDebug, }; -use sp_std::{prelude::*, cmp::Ordering}; +use sp_std::{cmp::Ordering, prelude::*}; mod benchmarking; pub mod weights; @@ -127,8 +127,9 @@ pub const MAXIMUM_VOTE: usize = 16; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// An indication that the renouncing account currently has which of the below roles. #[derive(Encode, Decode, Clone, PartialEq, RuntimeDebug)] @@ -171,14 +172,13 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { - type Event: From> - + IsType<::Event>; + type Event: From> + IsType<::Event>; /// Identifier for the elections-phragmen pallet's lock #[pallet::constant] @@ -320,24 +320,19 @@ pub mod pallet { let to_reserve = new_deposit - old_deposit; T::Currency::reserve(&who, to_reserve) .map_err(|_| Error::::UnableToPayBond)?; - } - Ordering::Equal => {} + }, + Ordering::Equal => {}, Ordering::Less => { // Must unreserve a bit. let to_unreserve = old_deposit - new_deposit; let _remainder = T::Currency::unreserve(&who, to_unreserve); debug_assert!(_remainder.is_zero()); - } + }, }; // Amount to be locked up. let locked_stake = value.min(T::Currency::total_balance(&who)); - T::Currency::set_lock( - T::PalletId::get(), - &who, - locked_stake, - WithdrawReasons::all(), - ); + T::Currency::set_lock(T::PalletId::get(), &who, locked_stake, WithdrawReasons::all()); Voting::::insert(&who, Voter { votes, deposit: new_deposit, stake: locked_stake }); Ok(None.into()) @@ -426,7 +421,7 @@ pub mod pallet { let _ = Self::remove_and_replace_member(&who, false) .map_err(|_| Error::::InvalidRenouncing)?; Self::deposit_event(Event::Renounced(who)); - } + }, Renouncing::RunnerUp => { >::try_mutate::<_, Error, _>(|runners_up| { let index = runners_up @@ -440,7 +435,7 @@ pub mod pallet { Self::deposit_event(Event::Renounced(who)); Ok(()) })?; - } + }, Renouncing::Candidate(count) => { >::try_mutate::<_, Error, _>(|candidates| { ensure!(count >= candidates.len() as u32, Error::::InvalidWitnessData); @@ -453,7 +448,7 @@ pub mod pallet { Self::deposit_event(Event::Renounced(who)); Ok(()) })?; - } + }, }; Ok(None.into()) } @@ -491,7 +486,7 @@ pub mod pallet { return Err(Error::::InvalidReplacement.with_weight( // refund. The weight value comes from a benchmark which is special to this. T::WeightInfo::remove_member_wrong_refund(), - )); + )) } let had_replacement = Self::remove_and_replace_member(&who, true)?; @@ -664,37 +659,46 @@ pub mod pallet { self.members.len() as u32 <= T::DesiredMembers::get(), "Cannot accept more than DesiredMembers genesis member", ); - let members = self.members.iter().map(|(ref member, ref stake)| { - // make sure they have enough stake. - assert!( - T::Currency::free_balance(member) >= *stake, - "Genesis member does not have enough stake.", - ); + let members = self + .members + .iter() + .map(|(ref member, ref stake)| { + // make sure they have enough stake. + assert!( + T::Currency::free_balance(member) >= *stake, + "Genesis member does not have enough stake.", + ); - // Note: all members will only vote for themselves, hence they must be given exactly - // their own stake as total backing. Any sane election should behave as such. - // Nonetheless, stakes will be updated for term 1 onwards according to the election. - Members::::mutate(|members| { - match members.binary_search_by(|m| m.who.cmp(member)) { - Ok(_) => panic!("Duplicate member in elections-phragmen genesis: {}", member), - Err(pos) => members.insert( - pos, - SeatHolder { who: member.clone(), stake: *stake, deposit: Zero::zero() }, - ), - } - }); - - // set self-votes to make persistent. Genesis voters don't have any bond, nor do - // they have any lock. NOTE: this means that we will still try to remove a lock once - // this genesis voter is removed, and for now it is okay because remove_lock is noop - // if lock is not there. - >::insert( - &member, - Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, - ); + // Note: all members will only vote for themselves, hence they must be given exactly + // their own stake as total backing. Any sane election should behave as such. + // Nonetheless, stakes will be updated for term 1 onwards according to the election. + Members::::mutate(|members| { + match members.binary_search_by(|m| m.who.cmp(member)) { + Ok(_) => + panic!("Duplicate member in elections-phragmen genesis: {}", member), + Err(pos) => members.insert( + pos, + SeatHolder { + who: member.clone(), + stake: *stake, + deposit: Zero::zero(), + }, + ), + } + }); + + // set self-votes to make persistent. Genesis voters don't have any bond, nor do + // they have any lock. NOTE: this means that we will still try to remove a lock once + // this genesis voter is removed, and for now it is okay because remove_lock is noop + // if lock is not there. + >::insert( + &member, + Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, + ); - member.clone() - }).collect::>(); + member.clone() + }) + .collect::>(); // report genesis members to upstream, if any. T::InitializeMembers::initialize_members(&members); @@ -731,8 +735,9 @@ impl Pallet { // - `Ok(None)` if member was removed but no replacement was found // - `Err(_)` if who is not a member. let maybe_replacement = >::try_mutate::<_, Error, _>(|members| { - let remove_index = - members.binary_search_by(|m| m.who.cmp(who)).map_err(|_| Error::::NotMember)?; + let remove_index = members + .binary_search_by(|m| m.who.cmp(who)) + .map_err(|_| Error::::NotMember)?; // we remove the member anyhow, regardless of having a runner-up or not. let removed = members.remove(remove_index); @@ -764,10 +769,8 @@ impl Pallet { Ok(maybe_next_best) })?; - let remaining_member_ids_sorted = Self::members() - .into_iter() - .map(|x| x.who.clone()) - .collect::>(); + let remaining_member_ids_sorted = + Self::members().into_iter().map(|x| x.who.clone()).collect::>(); let outgoing = &[who.clone()]; let maybe_current_prime = T::ChangeMembers::get_prime(); let return_value = match maybe_replacement { @@ -776,18 +779,18 @@ impl Pallet { T::ChangeMembers::change_members_sorted( &[incoming.who], outgoing, - &remaining_member_ids_sorted[..] + &remaining_member_ids_sorted[..], ); true - } + }, None => { T::ChangeMembers::change_members_sorted( &[], outgoing, - &remaining_member_ids_sorted[..] + &remaining_member_ids_sorted[..], ); false - } + }, }; // if there was a prime before and they are not the one being removed, then set them @@ -845,11 +848,9 @@ impl Pallet { /// O(NLogM) with M candidates and `who` having voted for `N` of them. /// Reads Members, RunnersUp, Candidates and Voting(who) from database. fn is_defunct_voter(votes: &[T::AccountId]) -> bool { - votes.iter().all(|v| - !Self::is_member(v) && - !Self::is_runner_up(v) && - !Self::is_candidate(v).is_ok() - ) + votes.iter().all(|v| { + !Self::is_member(v) && !Self::is_runner_up(v) && !Self::is_candidate(v).is_ok() + }) } /// Remove a certain someone as a voter. @@ -880,15 +881,12 @@ impl Pallet { if candidates_and_deposit.len().is_zero() { Self::deposit_event(Event::EmptyTerm); - return T::DbWeight::get().reads(5); + return T::DbWeight::get().reads(5) } // All of the new winners that come out of phragmen will thus have a deposit recorded. - let candidate_ids = candidates_and_deposit - .iter() - .map(|(x, _)| x) - .cloned() - .collect::>(); + let candidate_ids = + candidates_and_deposit.iter().map(|(x, _)| x).cloned().collect::>(); // helper closures to deal with balance/stake. let total_issuance = T::Currency::total_issuance(); @@ -898,10 +896,11 @@ impl Pallet { let mut num_edges: u32 = 0; // used for prime election. let voters_and_stakes = Voting::::iter() - .map(|(voter, Voter { stake, votes, .. })| { (voter, stake, votes) }) + .map(|(voter, Voter { stake, votes, .. })| (voter, stake, votes)) .collect::>(); // used for phragmen. - let voters_and_votes = voters_and_stakes.iter() + let voters_and_votes = voters_and_stakes + .iter() .cloned() .map(|(voter, stake, votes)| { num_edges = num_edges.saturating_add(votes.len() as u32); @@ -917,15 +916,14 @@ impl Pallet { candidate_ids, voters_and_votes.clone(), None, - ).map(|ElectionResult { winners, assignments: _, }| { + ) + .map(|ElectionResult { winners, assignments: _ }| { // this is already sorted by id. - let old_members_ids_sorted = >::take().into_iter() - .map(|m| m.who) - .collect::>(); + let old_members_ids_sorted = + >::take().into_iter().map(|m| m.who).collect::>(); // this one needs a sort by id. - let mut old_runners_up_ids_sorted = >::take().into_iter() - .map(|r| r.who) - .collect::>(); + let mut old_runners_up_ids_sorted = + >::take().into_iter().map(|r| r.who).collect::>(); old_runners_up_ids_sorted.sort(); // filter out those who end up with no backing stake. @@ -941,16 +939,15 @@ impl Pallet { // split new set into winners and runners up. let split_point = desired_seats.min(new_set_with_stake.len()); - let mut new_members_sorted_by_id = new_set_with_stake.drain(..split_point).collect::>(); + let mut new_members_sorted_by_id = + new_set_with_stake.drain(..split_point).collect::>(); new_members_sorted_by_id.sort_by(|i, j| i.0.cmp(&j.0)); // all the rest will be runners-up new_set_with_stake.reverse(); let new_runners_up_sorted_by_rank = new_set_with_stake; - let mut new_runners_up_ids_sorted = new_runners_up_sorted_by_rank - .iter() - .map(|(r, _)| r.clone()) - .collect::>(); + let mut new_runners_up_ids_sorted = + new_runners_up_sorted_by_rank.iter().map(|(r, _)| r.clone()).collect::>(); new_runners_up_ids_sorted.sort(); // Now we select a prime member using a [Borda @@ -963,14 +960,15 @@ impl Pallet { .map(|c| (&c.0, BalanceOf::::zero())) .collect::>(); for (_, stake, votes) in voters_and_stakes.into_iter() { - for (vote_multiplier, who) in votes.iter() + for (vote_multiplier, who) in votes + .iter() .enumerate() .map(|(vote_position, who)| ((MAXIMUM_VOTE - vote_position) as u32, who)) { if let Ok(i) = prime_votes.binary_search_by_key(&who, |k| k.0) { - prime_votes[i].1 = prime_votes[i].1.saturating_add( - stake.saturating_mul(vote_multiplier.into()) - ); + prime_votes[i].1 = prime_votes[i] + .1 + .saturating_add(stake.saturating_mul(vote_multiplier.into())); } } } @@ -990,18 +988,13 @@ impl Pallet { &new_members_ids_sorted, &old_members_ids_sorted, ); - T::ChangeMembers::change_members_sorted( - &incoming, - &outgoing, - &new_members_ids_sorted, - ); + T::ChangeMembers::change_members_sorted(&incoming, &outgoing, &new_members_ids_sorted); T::ChangeMembers::set_prime(prime); // All candidates/members/runners-up who are no longer retaining a position as a // seat holder will lose their bond. candidates_and_deposit.iter().for_each(|(c, d)| { - if - new_members_ids_sorted.binary_search(c).is_err() && + if new_members_ids_sorted.binary_search(c).is_err() && new_runners_up_ids_sorted.binary_search(c).is_err() { let (imbalance, _) = T::Currency::slash_reserved(c, *d); @@ -1048,7 +1041,8 @@ impl Pallet { Self::deposit_event(Event::NewTerm(new_members_sorted_by_id)); >::mutate(|v| *v += 1); - }).map_err(|e| { + }) + .map_err(|e| { log::error!( target: "runtime::elections-phragmen", "Failed to run election [{:?}].", @@ -1080,11 +1074,9 @@ impl SortedMembers for Pallet { // checks in runtime benchmarking. #[cfg(feature = "runtime-benchmarks")] fn add(who: &T::AccountId) { - Members::::mutate(|members| { - match members.binary_search_by(|m| m.who.cmp(who)) { - Ok(_) => (), - Err(pos) => members.insert(pos, SeatHolder { who: who.clone(), ..Default::default() }), - } + Members::::mutate(|members| match members.binary_search_by(|m| m.who.cmp(who)) { + Ok(_) => (), + Err(pos) => members.insert(pos, SeatHolder { who: who.clone(), ..Default::default() }), }) } } @@ -1103,19 +1095,19 @@ impl ContainsLengthBound for Pallet { #[cfg(test)] mod tests { use super::*; + use crate as elections_phragmen; use frame_support::{ - assert_ok, assert_noop, parameter_types, traits::OnInitialize, - dispatch::DispatchResultWithPostInfo, + assert_noop, assert_ok, dispatch::DispatchResultWithPostInfo, parameter_types, + traits::OnInitialize, }; - use substrate_test_utils::assert_eq_uvec; + use frame_system::ensure_signed; use sp_core::H256; use sp_runtime::{ - BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; - use frame_system::ensure_signed; - use crate as elections_phragmen; + use substrate_test_utils::assert_eq_uvec; parameter_types! { pub const BlockHashCount: u64 = 250; @@ -1264,10 +1256,7 @@ mod tests { impl Default for ExtBuilder { fn default() -> Self { - Self { - balance_factor: 1, - genesis_members: vec![], - } + Self { balance_factor: 1, genesis_members: vec![] } } } @@ -1290,10 +1279,7 @@ mod tests { } pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { MEMBERS.with(|m| { - *m.borrow_mut() = members - .iter() - .map(|(m, _)| m.clone()) - .collect::>() + *m.borrow_mut() = members.iter().map(|(m, _)| m.clone()).collect::>() }); self.genesis_members = members; self @@ -1307,22 +1293,28 @@ mod tests { self } pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - MEMBERS.with(|m| *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>()); + MEMBERS.with(|m| { + *m.borrow_mut() = + self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>() + }); let mut ext: sp_io::TestExternalities = GenesisConfig { - balances: pallet_balances::GenesisConfig::{ + balances: pallet_balances::GenesisConfig:: { balances: vec![ (1, 10 * self.balance_factor), (2, 20 * self.balance_factor), (3, 30 * self.balance_factor), (4, 40 * self.balance_factor), (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) + (6, 60 * self.balance_factor), ], }, elections: elections_phragmen::GenesisConfig:: { - members: self.genesis_members + members: self.genesis_members, }, - }.build_storage().unwrap().into(); + } + .build_storage() + .unwrap() + .into(); ext.execute_with(pre_conditions); ext.execute_with(test); ext.execute_with(post_conditions) @@ -1330,10 +1322,7 @@ mod tests { } fn candidate_ids() -> Vec { - Elections::candidates() - .into_iter() - .map(|(c, _)| c) - .collect::>() + Elections::candidates().into_iter().map(|(c, _)| c).collect::>() } fn candidate_deposit(who: &u64) -> u64 { @@ -1360,7 +1349,10 @@ mod tests { } fn runners_up_and_stake() -> Vec<(u64, u64)> { - Elections::runners_up().into_iter().map(|r| (r.who, r.stake)).collect::>() + Elections::runners_up() + .into_iter() + .map(|r| (r.who, r.stake)) + .collect::>() } fn all_voters() -> Vec { @@ -1473,64 +1465,88 @@ mod tests { #[test] fn genesis_members_should_work() { - ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| { - System::set_block_number(1); - assert_eq!( - Elections::members(), - vec![ - SeatHolder { who: 1, stake: 10, deposit: 0 }, - SeatHolder { who: 2, stake: 20, deposit: 0 } - ] - ); + ExtBuilder::default() + .genesis_members(vec![(1, 10), (2, 20)]) + .build_and_execute(|| { + System::set_block_number(1); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 1, stake: 10, deposit: 0 }, + SeatHolder { who: 2, stake: 20, deposit: 0 } + ] + ); - assert_eq!(Elections::voting(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 }); - assert_eq!(Elections::voting(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 }); + assert_eq!( + Elections::voting(1), + Voter { stake: 10u64, votes: vec![1], deposit: 0 } + ); + assert_eq!( + Elections::voting(2), + Voter { stake: 20u64, votes: vec![2], deposit: 0 } + ); - // they will persist since they have self vote. - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + // they will persist since they have self vote. + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![1, 2]); - }) + assert_eq!(members_ids(), vec![1, 2]); + }) } #[test] fn genesis_voters_can_remove_lock() { - ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| { - System::set_block_number(1); + ExtBuilder::default() + .genesis_members(vec![(1, 10), (2, 20)]) + .build_and_execute(|| { + System::set_block_number(1); - assert_eq!(Elections::voting(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 }); - assert_eq!(Elections::voting(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 }); + assert_eq!( + Elections::voting(1), + Voter { stake: 10u64, votes: vec![1], deposit: 0 } + ); + assert_eq!( + Elections::voting(2), + Voter { stake: 20u64, votes: vec![2], deposit: 0 } + ); - assert_ok!(Elections::remove_voter(Origin::signed(1))); - assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_ok!(Elections::remove_voter(Origin::signed(1))); + assert_ok!(Elections::remove_voter(Origin::signed(2))); - assert_eq!(Elections::voting(1), Default::default()); - assert_eq!(Elections::voting(2), Default::default()); - }) + assert_eq!(Elections::voting(1), Default::default()); + assert_eq!(Elections::voting(2), Default::default()); + }) } #[test] fn genesis_members_unsorted_should_work() { - ExtBuilder::default().genesis_members(vec![(2, 20), (1, 10)]).build_and_execute(|| { - System::set_block_number(1); - assert_eq!( - Elections::members(), - vec![ - SeatHolder { who: 1, stake: 10, deposit: 0 }, - SeatHolder { who: 2, stake: 20, deposit: 0 }, - ] - ); + ExtBuilder::default() + .genesis_members(vec![(2, 20), (1, 10)]) + .build_and_execute(|| { + System::set_block_number(1); + assert_eq!( + Elections::members(), + vec![ + SeatHolder { who: 1, stake: 10, deposit: 0 }, + SeatHolder { who: 2, stake: 20, deposit: 0 }, + ] + ); - assert_eq!(Elections::voting(1), Voter { stake: 10u64, votes: vec![1], deposit: 0 }); - assert_eq!(Elections::voting(2), Voter { stake: 20u64, votes: vec![2], deposit: 0 }); + assert_eq!( + Elections::voting(1), + Voter { stake: 10u64, votes: vec![1], deposit: 0 } + ); + assert_eq!( + Elections::voting(2), + Voter { stake: 20u64, votes: vec![2], deposit: 0 } + ); - // they will persist since they have self vote. - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + // they will persist since they have self vote. + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![1, 2]); - }) + assert_eq!(members_ids(), vec![1, 2]); + }) } #[test] @@ -1562,10 +1578,7 @@ mod tests { #[test] fn term_duration_zero_is_passive() { - ExtBuilder::default() - .term_duration(0) - .build_and_execute(|| - { + ExtBuilder::default().term_duration(0).build_and_execute(|| { assert_eq!(::TermDuration::get(), 0); assert_eq!(::DesiredMembers::get(), 2); assert_eq!(Elections::election_rounds(), 0); @@ -1664,10 +1677,7 @@ mod tests { assert_eq!(candidate_ids(), Vec::::new()); assert_ok!(submit_candidacy(Origin::signed(1))); assert_eq!(candidate_ids(), vec![1]); - assert_noop!( - submit_candidacy(Origin::signed(1)), - Error::::DuplicatedCandidate, - ); + assert_noop!(submit_candidacy(Origin::signed(1)), Error::::DuplicatedCandidate,); }); } @@ -1685,10 +1695,7 @@ mod tests { assert!(Elections::runners_up().is_empty()); assert!(candidate_ids().is_empty()); - assert_noop!( - submit_candidacy(Origin::signed(5)), - Error::::MemberSubmit, - ); + assert_noop!(submit_candidacy(Origin::signed(5)), Error::::MemberSubmit,); }); } @@ -1708,10 +1715,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(runners_up_ids(), vec![3]); - assert_noop!( - submit_candidacy(Origin::signed(3)), - Error::::RunnerUpSubmit, - ); + assert_noop!(submit_candidacy(Origin::signed(3)), Error::::RunnerUpSubmit,); }); } @@ -1846,10 +1850,7 @@ mod tests { #[test] fn cannot_vote_for_no_candidate() { ExtBuilder::default().build_and_execute(|| { - assert_noop!( - vote(Origin::signed(2), vec![], 20), - Error::::NoVotes, - ); + assert_noop!(vote(Origin::signed(2), vec![], 20), Error::::NoVotes,); }); } @@ -1934,10 +1935,7 @@ mod tests { assert_eq!(members_ids(), vec![4, 5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - assert_ok!(Elections::renounce_candidacy( - Origin::signed(4), - Renouncing::Member - )); + assert_ok!(Elections::renounce_candidacy(Origin::signed(4), Renouncing::Member)); assert_eq!(members_ids(), vec![5]); assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); @@ -1970,35 +1968,34 @@ mod tests { ExtBuilder::default() .desired_runners_up(1) .balance_factor(10) - .build_and_execute( - || { - // when we have only candidates - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + .build_and_execute(|| { + // when we have only candidates + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_noop!( - // content of the vote is irrelevant. - vote(Origin::signed(1), vec![9, 99, 999, 9999], 5), - Error::::TooManyVotes, - ); + assert_noop!( + // content of the vote is irrelevant. + vote(Origin::signed(1), vec![9, 99, 999, 9999], 5), + Error::::TooManyVotes, + ); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - // now we have 2 members, 1 runner-up, and 1 new candidate - assert_ok!(submit_candidacy(Origin::signed(2))); + // now we have 2 members, 1 runner-up, and 1 new candidate + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(Origin::signed(1), vec![9, 99, 999, 9999], 5)); - assert_noop!( - vote(Origin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), - Error::::TooManyVotes, - ); - }); + assert_ok!(vote(Origin::signed(1), vec![9, 99, 999, 9999], 5)); + assert_noop!( + vote(Origin::signed(1), vec![9, 99, 999, 9_999, 99_999], 5), + Error::::TooManyVotes, + ); + }); } #[test] @@ -2007,10 +2004,7 @@ mod tests { assert_ok!(submit_candidacy(Origin::signed(5))); assert_ok!(submit_candidacy(Origin::signed(4))); - assert_noop!( - vote(Origin::signed(2), vec![4], 1), - Error::::LowBalance, - ); + assert_noop!(vote(Origin::signed(2), vec![4], 1), Error::::LowBalance,); }) } @@ -2151,7 +2145,10 @@ mod tests { System::set_block_number(5); Elections::on_initialize(System::block_number()); - System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![(4, 40), (5, 50)]))); + System::assert_last_event(Event::Elections(super::Event::NewTerm(vec![ + (4, 40), + (5, 50), + ]))); assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); assert_eq!(runners_up_and_stake(), vec![]); @@ -2480,10 +2477,7 @@ mod tests { let unwrapped_error = Elections::remove_member(Origin::root(), 4, true).unwrap_err(); assert!(matches!( unwrapped_error.error, - DispatchError::Module { - message: Some("InvalidReplacement"), - .. - } + DispatchError::Module { message: Some("InvalidReplacement"), .. } )); assert!(unwrapped_error.post_info.actual_weight.is_some()); }); @@ -2506,10 +2500,7 @@ mod tests { let unwrapped_error = Elections::remove_member(Origin::root(), 4, false).unwrap_err(); assert!(matches!( unwrapped_error.error, - DispatchError::Module { - message: Some("InvalidReplacement"), - .. - } + DispatchError::Module { message: Some("InvalidReplacement"), .. } )); assert!(unwrapped_error.post_info.actual_weight.is_some()); }); @@ -2590,7 +2581,10 @@ mod tests { // 5 is an outgoing loser. will also get slashed. assert_eq!(balances(&5), (45, 2)); - System::assert_has_event(Event::Elections(super::Event::NewTerm(vec![(4, 40), (5, 50)]))); + System::assert_has_event(Event::Elections(super::Event::NewTerm(vec![ + (4, 40), + (5, 50), + ]))); }) } @@ -2636,24 +2630,22 @@ mod tests { #[test] fn runner_up_replacement_maintains_members_order() { - ExtBuilder::default() - .desired_runners_up(2) - .build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); + ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); assert_ok!(submit_candidacy(Origin::signed(2))); assert_ok!(vote(Origin::signed(2), vec![5], 20)); assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_ok!(vote(Origin::signed(5), vec![2], 50)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![2, 4]); - assert_ok!(Elections::remove_member(Origin::root(), 2, true)); - assert_eq!(members_ids(), vec![4, 5]); - }); + assert_eq!(members_ids(), vec![2, 4]); + assert_ok!(Elections::remove_member(Origin::root(), 2, true)); + assert_eq!(members_ids(), vec![4, 5]); + }); } #[test] @@ -2709,12 +2701,10 @@ mod tests { #[test] fn can_renounce_candidacy_runner_up() { - ExtBuilder::default() - .desired_runners_up(2) - .build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); assert_ok!(submit_candidacy(Origin::signed(2))); assert_ok!(vote(Origin::signed(5), vec![4], 50)); @@ -2722,21 +2712,18 @@ mod tests { assert_ok!(vote(Origin::signed(3), vec![3], 30)); assert_ok!(vote(Origin::signed(2), vec![2], 20)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2, 3]); - assert_ok!(Elections::renounce_candidacy( - Origin::signed(3), - Renouncing::RunnerUp - )); - assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. + assert_ok!(Elections::renounce_candidacy(Origin::signed(3), Renouncing::RunnerUp)); + assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. - assert_eq!(members_ids(), vec![4, 5]); - assert_eq!(runners_up_ids(), vec![2]); - }) + assert_eq!(members_ids(), vec![4, 5]); + assert_eq!(runners_up_ids(), vec![2]); + }) } #[test] @@ -2871,117 +2858,124 @@ mod tests { #[test] fn unsorted_runners_up_are_detected() { - ExtBuilder::default().desired_runners_up(2).desired_members(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); + ExtBuilder::default() + .desired_runners_up(2) + .desired_members(1) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); - assert_ok!(vote(Origin::signed(4), vec![4], 5)); - assert_ok!(vote(Origin::signed(3), vec![3], 15)); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); + assert_ok!(vote(Origin::signed(4), vec![4], 5)); + assert_ok!(vote(Origin::signed(3), vec![3], 15)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![5]); - assert_eq!(runners_up_ids(), vec![4, 3]); + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![4, 3]); - assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(Origin::signed(2), vec![2], 10)); + assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(vote(Origin::signed(2), vec![2], 10)); - System::set_block_number(10); - Elections::on_initialize(System::block_number()); + System::set_block_number(10); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![5]); - assert_eq!(runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![2, 3]); - // 4 is outgoing runner-up. Slash candidacy bond. - assert_eq!(balances(&4), (35, 2)); - // 3 stays. - assert_eq!(balances(&3), (25, 5)); - }) + // 4 is outgoing runner-up. Slash candidacy bond. + assert_eq!(balances(&4), (35, 2)); + // 3 stays. + assert_eq!(balances(&3), (25, 5)); + }) } #[test] fn member_to_runner_up_wont_slash() { - ExtBuilder::default().desired_runners_up(2).desired_members(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); - + ExtBuilder::default() + .desired_runners_up(2) + .desired_members(1) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![4]); - assert_eq!(runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4]); + assert_eq!(runners_up_ids(), vec![2, 3]); - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&3), (25, 5)); - assert_eq!(balances(&2), (15, 5)); + assert_eq!(balances(&4), (35, 5)); + assert_eq!(balances(&3), (25, 5)); + assert_eq!(balances(&2), (15, 5)); - // this guy will shift everyone down. - assert_ok!(submit_candidacy(Origin::signed(5))); - assert_ok!(vote(Origin::signed(5), vec![5], 50)); + // this guy will shift everyone down. + assert_ok!(submit_candidacy(Origin::signed(5))); + assert_ok!(vote(Origin::signed(5), vec![5], 50)); - System::set_block_number(10); - Elections::on_initialize(System::block_number()); + System::set_block_number(10); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![5]); - assert_eq!(runners_up_ids(), vec![3, 4]); + assert_eq!(members_ids(), vec![5]); + assert_eq!(runners_up_ids(), vec![3, 4]); - // 4 went from member to runner-up -- don't slash. - assert_eq!(balances(&4), (35, 5)); - // 3 stayed runner-up -- don't slash. - assert_eq!(balances(&3), (25, 5)); - // 2 was removed -- slash. - assert_eq!(balances(&2), (15, 2)); - }); + // 4 went from member to runner-up -- don't slash. + assert_eq!(balances(&4), (35, 5)); + // 3 stayed runner-up -- don't slash. + assert_eq!(balances(&3), (25, 5)); + // 2 was removed -- slash. + assert_eq!(balances(&2), (15, 2)); + }); } #[test] fn runner_up_to_member_wont_slash() { - ExtBuilder::default().desired_runners_up(2).desired_members(1).build_and_execute(|| { - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); - + ExtBuilder::default() + .desired_runners_up(2) + .desired_members(1) + .build_and_execute(|| { + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![4]); - assert_eq!(runners_up_ids(), vec![2, 3]); + assert_eq!(members_ids(), vec![4]); + assert_eq!(runners_up_ids(), vec![2, 3]); - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&3), (25, 5)); - assert_eq!(balances(&2), (15, 5)); + assert_eq!(balances(&4), (35, 5)); + assert_eq!(balances(&3), (25, 5)); + assert_eq!(balances(&2), (15, 5)); - // swap some votes. - assert_ok!(vote(Origin::signed(4), vec![2], 40)); - assert_ok!(vote(Origin::signed(2), vec![4], 20)); + // swap some votes. + assert_ok!(vote(Origin::signed(4), vec![2], 40)); + assert_ok!(vote(Origin::signed(2), vec![4], 20)); - System::set_block_number(10); - Elections::on_initialize(System::block_number()); + System::set_block_number(10); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![2]); - assert_eq!(runners_up_ids(), vec![4, 3]); + assert_eq!(members_ids(), vec![2]); + assert_eq!(runners_up_ids(), vec![4, 3]); - // 2 went from runner to member, don't slash - assert_eq!(balances(&2), (15, 5)); - // 4 went from member to runner, don't slash - assert_eq!(balances(&4), (35, 5)); - // 3 stayed the same - assert_eq!(balances(&3), (25, 5)); - }); + // 2 went from runner to member, don't slash + assert_eq!(balances(&2), (15, 5)); + // 4 went from member to runner, don't slash + assert_eq!(balances(&4), (35, 5)); + // 3 stayed the same + assert_eq!(balances(&3), (25, 5)); + }); } #[test] @@ -3031,14 +3025,17 @@ mod tests { #[test] fn no_desired_members() { // not interested in anything - ExtBuilder::default().desired_members(0).desired_runners_up(0).build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + ExtBuilder::default() + .desired_members(0) + .desired_runners_up(0) + .build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Elections::candidates().len(), 3); assert_ok!(vote(Origin::signed(4), vec![4], 40)); assert_ok!(vote(Origin::signed(3), vec![3], 30)); @@ -3048,56 +3045,62 @@ mod tests { Elections::on_initialize(System::block_number()); assert_eq!(members_ids().len(), 0); - assert_eq!(runners_up_ids().len(), 0); - assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); - }); + assert_eq!(runners_up_ids().len(), 0); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); // not interested in members - ExtBuilder::default().desired_members(0).desired_runners_up(2).build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + ExtBuilder::default() + .desired_members(0) + .desired_runners_up(2) + .build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids().len(), 0); - assert_eq!(runners_up_ids(), vec![3, 4]); - assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); - }); + assert_eq!(members_ids().len(), 0); + assert_eq!(runners_up_ids(), vec![3, 4]); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); // not interested in runners-up - ExtBuilder::default().desired_members(2).desired_runners_up(0).build_and_execute(|| { - assert_eq!(Elections::candidates().len(), 0); + ExtBuilder::default() + .desired_members(2) + .desired_runners_up(0) + .build_and_execute(|| { + assert_eq!(Elections::candidates().len(), 0); - assert_ok!(submit_candidacy(Origin::signed(4))); - assert_ok!(submit_candidacy(Origin::signed(3))); - assert_ok!(submit_candidacy(Origin::signed(2))); + assert_ok!(submit_candidacy(Origin::signed(4))); + assert_ok!(submit_candidacy(Origin::signed(3))); + assert_ok!(submit_candidacy(Origin::signed(2))); - assert_eq!(Elections::candidates().len(), 3); + assert_eq!(Elections::candidates().len(), 3); - assert_ok!(vote(Origin::signed(4), vec![4], 40)); - assert_ok!(vote(Origin::signed(3), vec![3], 30)); - assert_ok!(vote(Origin::signed(2), vec![2], 20)); + assert_ok!(vote(Origin::signed(4), vec![4], 40)); + assert_ok!(vote(Origin::signed(3), vec![3], 30)); + assert_ok!(vote(Origin::signed(2), vec![2], 20)); - System::set_block_number(5); - Elections::on_initialize(System::block_number()); + System::set_block_number(5); + Elections::on_initialize(System::block_number()); - assert_eq!(members_ids(), vec![3, 4]); - assert_eq!(runners_up_ids().len(), 0); - assert_eq!(all_voters().len(), 3); - assert_eq!(Elections::candidates().len(), 0); - }); + assert_eq!(members_ids(), vec![3, 4]); + assert_eq!(runners_up_ids().len(), 0); + assert_eq!(all_voters().len(), 3); + assert_eq!(Elections::candidates().len(), 0); + }); } #[test] diff --git a/substrate/frame/elections-phragmen/src/migrations/v3.rs b/substrate/frame/elections-phragmen/src/migrations/v3.rs index 8afc9ed66920b352bf8dbd99c686f3d6a45f0ffb..b19146a9e28e5fc7767bc437c5be5344a7ba7f1d 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v3.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v3.rs @@ -17,12 +17,13 @@ //! Migrations to version [`3.0.0`], as denoted by the changelog. -use codec::{Encode, Decode, FullCodec}; -use sp_std::prelude::*; +use codec::{Decode, Encode, FullCodec}; use frame_support::{ - RuntimeDebug, weights::Weight, Twox64Concat, traits::{GetPalletVersion, PalletVersion}, + weights::Weight, + RuntimeDebug, Twox64Concat, }; +use sp_std::prelude::*; #[derive(Encode, Decode, Clone, Default, RuntimeDebug, PartialEq)] struct SeatHolder { @@ -89,7 +90,7 @@ pub fn apply(old_voter_bond: T::Balance, old_candidacy_bond: T::Balan migrate_runners_up_to_recorded_deposit::(old_candidacy_bond); migrate_members_to_recorded_deposit::(old_candidacy_bond); Weight::max_value() - } + }, _ => { log::warn!( target: "runtime::elections-phragmen", @@ -103,15 +104,9 @@ pub fn apply(old_voter_bond: T::Balance, old_candidacy_bond: T::Balan /// Migrate from the old legacy voting bond (fixed) to the new one (per-vote dynamic). pub fn migrate_voters_to_recorded_deposit(old_deposit: T::Balance) { - >::translate::<(T::Balance, Vec), _>( - |_who, (stake, votes)| { - Some(Voter { - votes, - stake, - deposit: old_deposit, - }) - }, - ); + >::translate::<(T::Balance, Vec), _>(|_who, (stake, votes)| { + Some(Voter { votes, stake, deposit: old_deposit }) + }); log::info!( target: "runtime::elections-phragmen", @@ -122,50 +117,39 @@ pub fn migrate_voters_to_recorded_deposit(old_deposit: T::Balance) { /// Migrate all candidates to recorded deposit. pub fn migrate_candidates_to_recorded_deposit(old_deposit: T::Balance) { - let _ = >::translate::, _>( - |maybe_old_candidates| { - maybe_old_candidates.map(|old_candidates| { - log::info!( - target: "runtime::elections-phragmen", - "migrated {} candidate accounts.", - old_candidates.len(), - ); - old_candidates - .into_iter() - .map(|c| (c, old_deposit)) - .collect::>() - }) - }, - ); + let _ = >::translate::, _>(|maybe_old_candidates| { + maybe_old_candidates.map(|old_candidates| { + log::info!( + target: "runtime::elections-phragmen", + "migrated {} candidate accounts.", + old_candidates.len(), + ); + old_candidates.into_iter().map(|c| (c, old_deposit)).collect::>() + }) + }); } /// Migrate all members to recorded deposit. pub fn migrate_members_to_recorded_deposit(old_deposit: T::Balance) { - let _ = >::translate::, _>( - |maybe_old_members| { - maybe_old_members.map(|old_members| { - log::info!( - target: "runtime::elections-phragmen", - "migrated {} member accounts.", - old_members.len(), - ); - old_members - .into_iter() - .map(|(who, stake)| SeatHolder { - who, - stake, - deposit: old_deposit, - }) - .collect::>() - }) - }, - ); + let _ = >::translate::, _>(|maybe_old_members| { + maybe_old_members.map(|old_members| { + log::info!( + target: "runtime::elections-phragmen", + "migrated {} member accounts.", + old_members.len(), + ); + old_members + .into_iter() + .map(|(who, stake)| SeatHolder { who, stake, deposit: old_deposit }) + .collect::>() + }) + }); } /// Migrate all runners-up to recorded deposit. pub fn migrate_runners_up_to_recorded_deposit(old_deposit: T::Balance) { - let _ = >::translate::, _>( - |maybe_old_runners_up| { + let _ = + >::translate::, _>(|maybe_old_runners_up| { maybe_old_runners_up.map(|old_runners_up| { log::info!( target: "runtime::elections-phragmen", @@ -174,13 +158,8 @@ pub fn migrate_runners_up_to_recorded_deposit(old_deposit: T::Balance ); old_runners_up .into_iter() - .map(|(who, stake)| SeatHolder { - who, - stake, - deposit: old_deposit, - }) + .map(|(who, stake)| SeatHolder { who, stake, deposit: old_deposit }) .collect::>() }) - }, - ); + }); } diff --git a/substrate/frame/elections-phragmen/src/migrations/v4.rs b/substrate/frame/elections-phragmen/src/migrations/v4.rs index f704b203d34cf75e4a02a6a6aff2ec03e7d409d4..fde9a768f335ecf671fa9076eff04bb73b9585a8 100644 --- a/substrate/frame/elections-phragmen/src/migrations/v4.rs +++ b/substrate/frame/elections-phragmen/src/migrations/v4.rs @@ -18,8 +18,8 @@ //! Migrations to version [`4.0.0`], as denoted by the changelog. use frame_support::{ + traits::{Get, GetPalletVersion, PalletVersion}, weights::Weight, - traits::{GetPalletVersion, PalletVersion, Get}, }; /// The old prefix. @@ -32,17 +32,15 @@ pub const OLD_PREFIX: &[u8] = b"PhragmenElection"; /// `::PalletInfo::name::`. /// /// The old storage prefix, `PhragmenElection` is hardcoded in the migration code. -pub fn migrate< - T: frame_system::Config, - P: GetPalletVersion, - N: AsRef, ->(new_pallet_name: N) -> Weight { +pub fn migrate>( + new_pallet_name: N, +) -> Weight { if new_pallet_name.as_ref().as_bytes() == OLD_PREFIX { log::info!( target: "runtime::elections-phragmen", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0; + return 0 } let maybe_storage_version =

::storage_version(); log::info!( @@ -59,7 +57,7 @@ pub fn migrate< new_pallet_name.as_ref().as_bytes(), ); ::BlockWeights::get().max_block - } + }, _ => { log::warn!( target: "runtime::elections-phragmen", @@ -103,7 +101,7 @@ pub fn pre_migration>(new: N) { /// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. /// /// Panics if anything goes wrong. -pub fn post_migration

() { +pub fn post_migration() { log::info!("post-migration elections-phragmen"); // ensure we've been updated to v4 by the automatic write of crate version -> storage version. assert!(

::storage_version().unwrap().major == 4); diff --git a/substrate/frame/elections-phragmen/src/weights.rs b/substrate/frame/elections-phragmen/src/weights.rs index 12a3a433401bb44dd8d8abbef82cbec90191077f..ce558fb9d7f06b1b27b9124613e9b2873f182ce4 100644 --- a/substrate/frame/elections-phragmen/src/weights.rs +++ b/substrate/frame/elections-phragmen/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/elections/src/lib.rs b/substrate/frame/elections/src/lib.rs index 54bdb1f90dde64bafddfc035d18576e47ac08057..e51733a79db90eb646409a3584993dce7a2b74a6 100644 --- a/substrate/frame/elections/src/lib.rs +++ b/substrate/frame/elections/src/lib.rs @@ -29,24 +29,26 @@ //! whose voting is serially unsuccessful. #![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_std::prelude::*; -use sp_runtime::{ - RuntimeDebug, print, - traits::{Zero, One, StaticLookup, Saturating}, -}; +use codec::{Decode, Encode}; use frame_support::{ - pallet_prelude::*, ensure, - weights::{Weight, DispatchClass}, + ensure, + pallet_prelude::*, traits::{ - Currency, ExistenceRequirement, LockableCurrency, LockIdentifier, BalanceStatus, - OnUnbalanced, ReservableCurrency, WithdrawReasons, ChangeMembers, - } + BalanceStatus, ChangeMembers, Currency, ExistenceRequirement, LockIdentifier, + LockableCurrency, OnUnbalanced, ReservableCurrency, WithdrawReasons, + }, + weights::{DispatchClass, Weight}, }; -use codec::{Encode, Decode}; use frame_system::pallet_prelude::*; pub use pallet::*; +use sp_runtime::{ + print, + traits::{One, Saturating, StaticLookup, Zero}, + RuntimeDebug, +}; +use sp_std::prelude::*; mod mock; mod tests; @@ -140,9 +142,11 @@ pub const VOTER_SET_SIZE: usize = 64; /// NUmber of approvals grouped in one chunk. pub const APPROVAL_SET_SIZE: usize = 8; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// Index used to access chunks. type SetIndex = u32; @@ -170,8 +174,7 @@ pub mod pallet { type PalletId: Get; /// The currency that people are electing with. - type Currency: - LockableCurrency + type Currency: LockableCurrency + ReservableCurrency; /// Handler for the unbalanced reduction when slashing a validator. @@ -239,14 +242,14 @@ pub mod pallet { #[pallet::extra_constants] impl Pallet { - //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. /// The chunk size of the voter vector. #[allow(non_snake_case)] fn VOTER_SET_SIZE() -> u32 { VOTER_SET_SIZE as u32 } - //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. /// The chunk size of the approval vector. #[allow(non_snake_case)] fn APPROVAL_SET_SIZE() -> u32 { @@ -292,17 +295,12 @@ pub mod pallet { // bit-wise manner. In order to get a human-readable representation (`Vec`), use // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of // `APPROVAL_SET_SIZE`. - /// /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash and `SetIndex` is not /// attacker-controlled. #[pallet::storage] #[pallet::getter(fn approvals_of)] - pub type ApprovalsOf = StorageMap< - _, - Twox64Concat, (T::AccountId, SetIndex), - Vec, - ValueQuery, - >; + pub type ApprovalsOf = + StorageMap<_, Twox64Concat, (T::AccountId, SetIndex), Vec, ValueQuery>; /// The vote index and list slot that the candidate `who` was registered or `None` if they /// are not currently registered. @@ -310,26 +308,24 @@ pub mod pallet { /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. #[pallet::storage] #[pallet::getter(fn candidate_reg_info)] - pub type RegisterInfoOf = StorageMap<_, Twox64Concat, T::AccountId, (VoteIndex, u32)>; + pub type RegisterInfoOf = + StorageMap<_, Twox64Concat, T::AccountId, (VoteIndex, u32)>; /// Basic information about a voter. /// /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. #[pallet::storage] #[pallet::getter(fn voter_info)] - pub type VoterInfoOf = StorageMap<_, Twox64Concat, T::AccountId, VoterInfo>>; + pub type VoterInfoOf = + StorageMap<_, Twox64Concat, T::AccountId, VoterInfo>>; /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). /// /// TWOX-NOTE: OKAY ― `SetIndex` is not user-controlled data. #[pallet::storage] #[pallet::getter(fn voters)] - pub type Voters = StorageMap< - _, - Twox64Concat, SetIndex, - Vec>, - ValueQuery, - >; + pub type Voters = + StorageMap<_, Twox64Concat, SetIndex, Vec>, ValueQuery>; /// the next free set to store a voter in. This will keep growing. #[pallet::storage] @@ -559,7 +555,8 @@ pub mod pallet { let reporter_index = reporter_index as usize; let who_index = who_index as usize; - let assumed_reporter = Self::voter_at(reporter_index).ok_or(Error::::InvalidReporterIndex)?; + let assumed_reporter = + Self::voter_at(reporter_index).ok_or(Error::::InvalidReporterIndex)?; let assumed_who = Self::voter_at(who_index).ok_or(Error::::InvalidTargetIndex)?; ensure!(assumed_reporter == reporter, Error::::InvalidReporterIndex); @@ -567,29 +564,31 @@ pub mod pallet { // will definitely kill one of reporter or who now. - let valid = !Self::all_approvals_of(&who).iter() - .zip(Self::candidates().iter()) - .any(|(&appr, addr)| - appr && + let valid = !Self::all_approvals_of(&who).iter().zip(Self::candidates().iter()).any( + |(&appr, addr)| { + appr && *addr != T::AccountId::default() && // defensive only: all items in candidates list are registered Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active) - ); + }, + ); Self::remove_voter( if valid { &who } else { &reporter }, - if valid { who_index } else { reporter_index } + if valid { who_index } else { reporter_index }, ); - T::Currency::remove_lock( - T::PalletId::get(), - if valid { &who } else { &reporter } - ); + T::Currency::remove_lock(T::PalletId::get(), if valid { &who } else { &reporter }); if valid { // This only fails if `reporter` doesn't exist, which it clearly must do since its // the origin. Still, it's no more harmful to propagate any error at this point. - T::Currency::repatriate_reserved(&who, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; + T::Currency::repatriate_reserved( + &who, + &reporter, + T::VotingBond::get(), + BalanceStatus::Free, + )?; Self::deposit_event(Event::::VoterReaped(who, reporter)); } else { let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; @@ -614,7 +613,10 @@ pub mod pallet { /// - Two fewer DB entries, one DB change. /// # #[pallet::weight(1_250_000_000)] - pub fn retract_voter(origin: OriginFor, #[pallet::compact] index: u32) -> DispatchResult { + pub fn retract_voter( + origin: OriginFor, + #[pallet::compact] index: u32, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!Self::presentation_active(), Error::::CannotRetractPresenting); @@ -644,7 +646,10 @@ pub mod pallet { /// - Three DB changes. /// # #[pallet::weight(2_500_000_000)] - pub fn submit_candidacy(origin: OriginFor, #[pallet::compact] slot: u32) -> DispatchResult { + pub fn submit_candidacy( + origin: OriginFor, + #[pallet::compact] slot: u32, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!Self::is_a_candidate(&who), Error::::DuplicatedCandidate); @@ -689,38 +694,31 @@ pub mod pallet { #[pallet::compact] index: VoteIndex, ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!( - !total.is_zero(), - Error::::ZeroDeposit, - ); + ensure!(!total.is_zero(), Error::::ZeroDeposit,); let candidate = T::Lookup::lookup(candidate)?; ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); - let (_, _, expiring) = Self::next_finalize() - .ok_or(Error::::NotPresentationPeriod)?; + let (_, _, expiring) = + Self::next_finalize().ok_or(Error::::NotPresentationPeriod)?; let bad_presentation_punishment = - T::PresentSlashPerVoter::get() - * BalanceOf::::from(Self::voter_count() as u32); + T::PresentSlashPerVoter::get() * BalanceOf::::from(Self::voter_count() as u32); ensure!( T::Currency::can_slash(&who, bad_presentation_punishment), Error::::InsufficientPresenterFunds, ); - let mut leaderboard = Self::leaderboard() - .ok_or(Error::::LeaderboardMustExist)?; + let mut leaderboard = Self::leaderboard().ok_or(Error::::LeaderboardMustExist)?; ensure!(total > leaderboard[0].0, Error::::UnworthyCandidate); if let Some(p) = Self::members().iter().position(|&(ref c, _)| c == &candidate) { - ensure!( - p < expiring.len(), - Error::::DuplicatedCandidate, - ); + ensure!(p < expiring.len(), Error::::DuplicatedCandidate,); } let voters = Self::all_voters(); let (registered_since, candidate_index): (VoteIndex, u32) = Self::candidate_reg_info(&candidate).ok_or(Error::::InvalidCandidate)?; - let actual_total = voters.iter() + let actual_total = voters + .iter() .filter_map(|maybe_voter| maybe_voter.as_ref()) .filter_map(|voter| match Self::voter_info(voter) { Some(b) if b.last_active >= registered_since => { @@ -731,7 +729,9 @@ pub mod pallet { let weight = stake + offset + b.pot; if Self::approvals_of_at(voter, candidate_index as usize) { Some(weight) - } else { None } + } else { + None + } }, _ => None, }) @@ -748,7 +748,11 @@ pub mod pallet { // better safe than sorry. let imbalance = T::Currency::slash(&who, bad_presentation_punishment).0; T::BadPresentation::on_unbalanced(imbalance); - Err(if dupe { Error::::DuplicatedPresentation } else { Error::::IncorrectTotal })? + Err(if dupe { + Error::::DuplicatedPresentation + } else { + Error::::IncorrectTotal + })? } } @@ -756,7 +760,10 @@ pub mod pallet { /// election when they expire. If more, then a new vote will be started if one is not /// already in progress. #[pallet::weight((0, DispatchClass::Operational))] - pub fn set_desired_seats(origin: OriginFor, #[pallet::compact] count: u32) -> DispatchResult { + pub fn set_desired_seats( + origin: OriginFor, + #[pallet::compact] count: u32, + ) -> DispatchResult { ensure_root(origin)?; DesiredSeats::::put(count); Ok(()) @@ -767,13 +774,14 @@ pub mod pallet { /// Note: A tally should happen instantly (if not already in a presentation /// period) to fill the seat if removal means that the desired members are not met. #[pallet::weight((0, DispatchClass::Operational))] - pub fn remove_member(origin: OriginFor, who: ::Source) -> DispatchResult { + pub fn remove_member( + origin: OriginFor, + who: ::Source, + ) -> DispatchResult { ensure_root(origin)?; let who = T::Lookup::lookup(who)?; - let new_set: Vec<(T::AccountId, T::BlockNumber)> = Self::members() - .into_iter() - .filter(|i| i.0 != who) - .collect(); + let new_set: Vec<(T::AccountId, T::BlockNumber)> = + Self::members().into_iter().filter(|i| i.0 != who).collect(); >::put(&new_set); let new_set = new_set.into_iter().map(|x| x.0).collect::>(); T::ChangeMembers::change_members(&[], &[who], new_set); @@ -821,7 +829,8 @@ impl Pallet { /// Iff the member `who` still has a seat at blocknumber `n` returns `true`. pub fn will_still_be_member_at(who: &T::AccountId, n: T::BlockNumber) -> bool { - Self::members().iter() + Self::members() + .iter() .find(|&&(ref a, _)| a == who) .map(|&(_, expires)| expires > n) .unwrap_or(false) @@ -859,7 +868,8 @@ impl Pallet { } else { Some(c[c.len() - (desired_seats - coming) as usize].1) } - }.map(Self::next_vote_from) + } + .map(Self::next_vote_from) } } @@ -906,18 +916,12 @@ impl Pallet { ensure!(!Self::presentation_active(), Error::::ApprovalPresentation); ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); - ensure!( - !candidates_len.is_zero(), - Error::::ZeroCandidates, - ); + ensure!(!candidates_len.is_zero(), Error::::ZeroCandidates,); // Prevent a vote from voters that provide a list of votes that exceeds the candidates // length since otherwise an attacker may be able to submit a very long list of `votes` that // far exceeds the amount of candidates and waste more computation than a reasonable voting // bond would cover. - ensure!( - candidates_len >= votes.len(), - Error::::TooManyVotes, - ); + ensure!(candidates_len >= votes.len(), Error::::TooManyVotes,); ensure!(value >= T::MinimumVotingLock::get(), Error::::InsufficientLockedValue); // Amount to be locked up. @@ -969,19 +973,14 @@ impl Pallet { NextVoterSet::::put(next + 1); } >::append(next, Some(who.clone())); - } + }, } T::Currency::reserve(&who, T::VotingBond::get())?; VoterCount::::mutate(|c| *c = *c + 1); } - T::Currency::set_lock( - T::PalletId::get(), - &who, - locked_balance, - WithdrawReasons::all(), - ); + T::Currency::set_lock(T::PalletId::get(), &who, locked_balance, WithdrawReasons::all()); >::insert( &who, @@ -990,7 +989,7 @@ impl Pallet { last_win: index, stake: locked_balance, pot: pot_to_set, - } + }, ); Self::set_approvals_chunked(&who, votes); @@ -1002,18 +1001,26 @@ impl Pallet { let members = Self::members(); let desired_seats = Self::desired_seats() as usize; let number = >::block_number(); - let expiring = - members.iter().take_while(|i| i.1 <= number).map(|i| i.0.clone()).collect::>(); + let expiring = members + .iter() + .take_while(|i| i.1 <= number) + .map(|i| i.0.clone()) + .collect::>(); let retaining_seats = members.len() - expiring.len(); if retaining_seats < desired_seats { let empty_seats = desired_seats - retaining_seats; - >::put( - (number + Self::presentation_duration(), empty_seats as u32, expiring) - ); + >::put(( + number + Self::presentation_duration(), + empty_seats as u32, + expiring, + )); // initialize leaderboard. let leaderboard_size = empty_seats + T::CarryCount::get() as usize; - >::put(vec![(BalanceOf::::zero(), T::AccountId::default()); leaderboard_size]); + >::put(vec![ + (BalanceOf::::zero(), T::AccountId::default()); + leaderboard_size + ]); Self::deposit_event(Event::::TallyStarted(empty_seats as u32)); } @@ -1027,19 +1034,22 @@ impl Pallet { let (_, coming, expiring): (T::BlockNumber, u32, Vec) = >::take() .ok_or("finalize can only be called after a tally is started.")?; - let leaderboard: Vec<(BalanceOf, T::AccountId)> = >::take() - .unwrap_or_default(); + let leaderboard: Vec<(BalanceOf, T::AccountId)> = + >::take().unwrap_or_default(); let new_expiry = >::block_number() + Self::term_duration(); // return bond to winners. let candidacy_bond = T::CandidacyBond::get(); - let incoming: Vec<_> = leaderboard.iter() + let incoming: Vec<_> = leaderboard + .iter() .rev() .take_while(|&&(b, _)| !b.is_zero()) .take(coming as usize) .map(|(_, a)| a) .cloned() - .inspect(|a| { T::Currency::unreserve(a, candidacy_bond); }) + .inspect(|a| { + T::Currency::unreserve(a, candidacy_bond); + }) .collect(); // Update last win index for anyone voted for any of the incomings. @@ -1049,14 +1059,16 @@ impl Pallet { .iter() .filter_map(|mv| mv.as_ref()) .filter(|v| Self::approvals_of_at(*v, index)) - .for_each(|v| >::mutate(v, |a| { - if let Some(activity) = a { activity.last_win = Self::vote_index() + 1; } - })); + .for_each(|v| { + >::mutate(v, |a| { + if let Some(activity) = a { + activity.last_win = Self::vote_index() + 1; + } + }) + }); }); let members = Self::members(); - let outgoing: Vec<_> = members.iter() - .take(expiring.len()) - .map(|a| a.0.clone()).collect(); + let outgoing: Vec<_> = members.iter().take(expiring.len()).map(|a| a.0.clone()).collect(); // set the new membership set. let mut new_set: Vec<_> = members @@ -1072,8 +1084,9 @@ impl Pallet { // clear all except runners-up from candidate list. let candidates = Self::candidates(); - let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. - let runners_up = leaderboard.into_iter() + let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. + let runners_up = leaderboard + .into_iter() .rev() .take_while(|&(b, _)| !b.is_zero()) .skip(coming as usize) @@ -1098,11 +1111,10 @@ impl Pallet { } } // discard any superfluous slots. - if let Some(last_index) = new_candidates - .iter() - .rposition(|c| *c != T::AccountId::default()) { - new_candidates.truncate(last_index + 1); - } + if let Some(last_index) = new_candidates.iter().rposition(|c| *c != T::AccountId::default()) + { + new_candidates.truncate(last_index + 1); + } Self::deposit_event(Event::::TallyFinalized(incoming, outgoing)); @@ -1131,7 +1143,7 @@ impl Pallet { loop { let next_set = >::get(index); if next_set.is_empty() { - break; + break } else { index += 1; all.extend(next_set); @@ -1177,9 +1189,7 @@ impl Pallet { approvals_flag_vec .chunks(APPROVAL_SET_SIZE) .enumerate() - .for_each(|(index, slice)| >::insert( - (&who, index as SetIndex), slice) - ); + .for_each(|(index, slice)| >::insert((&who, index as SetIndex), slice)); } /// shorthand for fetching a specific approval of a voter at a specific (global) index. @@ -1204,7 +1214,7 @@ impl Pallet { /// Return true of the bit `n` of scalar `x` is set to `1` and false otherwise. fn bit_at(x: ApprovalFlag, n: usize) -> bool { if n < APPROVAL_FLAG_LEN { - x & ( 1 << n ) != 0 + x & (1 << n) != 0 } else { false } @@ -1215,7 +1225,7 @@ impl Pallet { pub fn bool_to_flag(x: Vec) -> Vec { let mut result: Vec = Vec::with_capacity(x.len() / APPROVAL_FLAG_LEN); if x.is_empty() { - return result; + return result } result.push(0); let mut index = 0; @@ -1224,7 +1234,9 @@ impl Pallet { let shl_index = counter % APPROVAL_FLAG_LEN; result[index] += (if x[counter] { 1 } else { 0 }) << shl_index; counter += 1; - if counter > x.len() - 1 { break; } + if counter > x.len() - 1 { + break + } if counter % APPROVAL_FLAG_LEN == 0 { result.push(0); index += 1; @@ -1236,15 +1248,18 @@ impl Pallet { /// Convert a vec of flags (u32) to boolean. pub fn flag_to_bool(chunk: Vec) -> Vec { let mut result = Vec::with_capacity(chunk.len()); - if chunk.is_empty() { return vec![] } - chunk.into_iter() - .map(|num| + if chunk.is_empty() { + return vec![] + } + chunk + .into_iter() + .map(|num| { (0..APPROVAL_FLAG_LEN).map(|bit| Self::bit_at(num, bit)).collect::>() - ) + }) .for_each(|c| { let last_approve = match c.iter().rposition(|n| *n) { Some(index) => index + 1, - None => 0 + None => 0, }; result.extend(c.into_iter().take(last_approve)); }); @@ -1258,7 +1273,9 @@ impl Pallet { let mut index = 0_u32; loop { let chunk = Self::approvals_of((who.clone(), index)); - if chunk.is_empty() { break; } + if chunk.is_empty() { + break + } all.extend(Self::flag_to_bool(chunk)); index += 1; } @@ -1291,7 +1308,9 @@ impl Pallet { /// returned if `t` is zero. fn get_offset(stake: BalanceOf, t: VoteIndex) -> BalanceOf { let decay_ratio: BalanceOf = T::DecayRatio::get().into(); - if t > 150 { return stake * decay_ratio } + if t > 150 { + return stake * decay_ratio + } let mut offset = stake; let mut r = Zero::zero(); let decay = decay_ratio + One::one(); diff --git a/substrate/frame/elections/src/mock.rs b/substrate/frame/elections/src/mock.rs index 4df6da829a18eb69a9707cb3520b2bc3c3543844..78982f7af39884e4532c8c4295bfe487c0c2a8b8 100644 --- a/substrate/frame/elections/src/mock.rs +++ b/substrate/frame/elections/src/mock.rs @@ -19,16 +19,17 @@ #![cfg(test)] +use crate as elections; use frame_support::{ - parameter_types, assert_ok, + assert_ok, parameter_types, traits::{ChangeMembers, Currency, LockIdentifier}, }; use sp_core::H256; use sp_runtime::{ - BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; -use crate as elections; - parameter_types! { pub const BlockHashCount: u64 = 250; @@ -104,7 +105,7 @@ impl ChangeMembers for TestChangeMembers { } } -parameter_types!{ +parameter_types! { pub const ElectionPalletId: LockIdentifier = *b"py/elect"; } @@ -197,56 +198,55 @@ impl ExtBuilder { PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); let mut ext: sp_io::TestExternalities = GenesisConfig { - balances: pallet_balances::GenesisConfig::{ + balances: pallet_balances::GenesisConfig:: { balances: vec![ (1, 10 * self.balance_factor), (2, 20 * self.balance_factor), (3, 30 * self.balance_factor), (4, 40 * self.balance_factor), (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) + (6, 60 * self.balance_factor), ], }, - elections: elections::GenesisConfig::{ + elections: elections::GenesisConfig:: { members: vec![], desired_seats: self.desired_seats, presentation_duration: 2, term_duration: 5, }, - }.build_storage().unwrap().into(); + } + .build_storage() + .unwrap() + .into(); ext.execute_with(|| System::set_block_number(1)); ext } } pub(crate) fn voter_ids() -> Vec { - Elections::all_voters().iter().map(|v| v.unwrap_or(0) ).collect::>() + Elections::all_voters().iter().map(|v| v.unwrap_or(0)).collect::>() } pub(crate) fn vote(i: u64, l: usize) { let _ = Balances::make_free_balance_be(&i, 20); - assert_ok!( - Elections::set_approvals( - Origin::signed(i), - (0..l).map(|_| true).collect::>(), - 0, - 0, - 20, - ) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(i), + (0..l).map(|_| true).collect::>(), + 0, + 0, + 20, + )); } pub(crate) fn vote_at(i: u64, l: usize, index: elections::VoteIndex) { let _ = Balances::make_free_balance_be(&i, 20); - assert_ok!( - Elections::set_approvals( - Origin::signed(i), - (0..l).map(|_| true).collect::>(), - 0, - index, - 20, - ) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(i), + (0..l).map(|_| true).collect::>(), + 0, + index, + 20, + )); } pub(crate) fn create_candidate(i: u64, index: u32) { diff --git a/substrate/frame/elections/src/tests.rs b/substrate/frame/elections/src/tests.rs index 62e28eb6da0828c3bf0349d65e7e5502862d8942..0df84c6d79baf7279c07044e2ad0057de6a98fad 100644 --- a/substrate/frame/elections/src/tests.rs +++ b/substrate/frame/elections/src/tests.rs @@ -19,10 +19,9 @@ #![cfg(test)] -use crate::mock::*; -use crate::*; +use crate::{mock::*, *}; -use frame_support::{assert_ok, assert_err, assert_noop}; +use frame_support::{assert_err, assert_noop, assert_ok}; #[test] fn params_should_work() { @@ -60,38 +59,23 @@ fn chunking_bool_to_flag_should_work() { assert_eq!(Elections::bool_to_flag(vec![true, true, true, true, true]), vec![15 + 16]); let set_1 = vec![ - true, false, false, false, // 0x1 - false, true, true, true, // 0xE + true, false, false, false, // 0x1 + false, true, true, true, // 0xE ]; - assert_eq!( - Elections::bool_to_flag(set_1.clone()), - vec![0x00_00_00_E1_u32] - ); - assert_eq!( - Elections::flag_to_bool(vec![0x00_00_00_E1_u32]), - set_1 - ); + assert_eq!(Elections::bool_to_flag(set_1.clone()), vec![0x00_00_00_E1_u32]); + assert_eq!(Elections::flag_to_bool(vec![0x00_00_00_E1_u32]), set_1); let set_2 = vec![ - false, false, false, true, // 0x8 - false, true, false, true, // 0xA + false, false, false, true, // 0x8 + false, true, false, true, // 0xA ]; - assert_eq!( - Elections::bool_to_flag(set_2.clone()), - vec![0x00_00_00_A8_u32] - ); - assert_eq!( - Elections::flag_to_bool(vec![0x00_00_00_A8_u32]), - set_2 - ); + assert_eq!(Elections::bool_to_flag(set_2.clone()), vec![0x00_00_00_A8_u32]); + assert_eq!(Elections::flag_to_bool(vec![0x00_00_00_A8_u32]), set_2); - let mut rhs = (0..100/APPROVAL_FLAG_LEN).map(|_| 0xFFFFFFFF_u32).collect::>(); + let mut rhs = (0..100 / APPROVAL_FLAG_LEN).map(|_| 0xFFFFFFFF_u32).collect::>(); // NOTE: this might be need change based on `APPROVAL_FLAG_LEN`. rhs.extend(vec![0x00_00_00_0F]); - assert_eq!( - Elections::bool_to_flag((0..100).map(|_| true).collect()), - rhs - ) + assert_eq!(Elections::bool_to_flag((0..100).map(|_| true).collect()), rhs) }) } @@ -160,7 +144,7 @@ fn chunking_voter_set_reclaim_should_work() { fn chunking_approvals_set_growth_should_work() { ExtBuilder::default().build().execute_with(|| { // create candidates and voters. - (1..=250).for_each(|i| create_candidate(i, (i-1) as u32)); + (1..=250).for_each(|i| create_candidate(i, (i - 1) as u32)); (1..=250).for_each(|i| vote(i, i as usize)); // all approvals of should return the exact expected vector. @@ -168,26 +152,11 @@ fn chunking_approvals_set_growth_should_work() { Elections::all_approvals_of(&180), (0..180).map(|_| true).collect::>() ); - assert_eq!( - Elections::all_approvals_of(&32), - (0..32).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&8), - (0..8).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&64), - (0..64).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&65), - (0..65).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&63), - (0..63).map(|_| true).collect::>() - ); + assert_eq!(Elections::all_approvals_of(&32), (0..32).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&8), (0..8).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&64), (0..64).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&65), (0..65).map(|_| true).collect::>()); + assert_eq!(Elections::all_approvals_of(&63), (0..63).map(|_| true).collect::>()); // NOTE: assuming that APPROVAL_SET_SIZE is more or less small-ish. Might fail otherwise. let full_sets = (180 / APPROVAL_FLAG_LEN) / APPROVAL_SET_SIZE; @@ -197,10 +166,9 @@ fn chunking_approvals_set_growth_should_work() { // grab and check the last full set, if it exists. if full_sets > 0 { assert_eq!( - Elections::approvals_of((180, (full_sets-1) as SetIndex )), + Elections::approvals_of((180, (full_sets - 1) as SetIndex)), Elections::bool_to_flag( - (0..APPROVAL_SET_SIZE * APPROVAL_FLAG_LEN) - .map(|_| true).collect::>() + (0..APPROVAL_SET_SIZE * APPROVAL_FLAG_LEN).map(|_| true).collect::>() ) ); } @@ -210,8 +178,7 @@ fn chunking_approvals_set_growth_should_work() { assert_eq!( Elections::approvals_of((180, full_sets as SetIndex)), Elections::bool_to_flag( - (0..left_over * APPROVAL_FLAG_LEN + rem) - .map(|_| true).collect::>() + (0..left_over * APPROVAL_FLAG_LEN + rem).map(|_| true).collect::>() ) ); } @@ -311,7 +278,7 @@ fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { assert_eq!(balances(&64), (18, 2)); assert_eq!( Elections::voter_info(&64).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 20, pot:0 } + VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 } ); assert_eq!(Elections::next_nonfull_voter_set(), 1); @@ -321,7 +288,7 @@ fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { assert_eq!(balances(&65), (13, 2)); assert_eq!( Elections::voter_info(&65).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 15, pot:0 } + VoterInfo { last_win: 0, last_active: 0, stake: 15, pot: 0 } ); }); } @@ -374,7 +341,7 @@ fn voting_locking_more_than_total_balance_is_moot() { assert_eq!(balances(&3), (28, 2)); assert_eq!( Elections::voter_info(&3).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 30, pot:0 } + VoterInfo { last_win: 0, last_active: 0, stake: 30, pot: 0 } ); }); } @@ -424,7 +391,7 @@ fn voting_setting_an_approval_vote_count_more_than_candidate_count_should_not_wo assert_eq!(Elections::candidates().len(), 1); assert_noop!( - Elections::set_approvals(Origin::signed(4),vec![true, true], 0, 0, 40), + Elections::set_approvals(Origin::signed(4), vec![true, true], 0, 0, 40), Error::::TooManyVotes, ); }); @@ -498,7 +465,10 @@ fn voting_invalid_retraction_index_should_not_work() { assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); assert_eq!(voter_ids(), vec![1, 2]); - assert_noop!(Elections::retract_voter(Origin::signed(1), 1), Error::::InvalidRetractionIndex); + assert_noop!( + Elections::retract_voter(Origin::signed(1), 1), + Error::::InvalidRetractionIndex + ); }); } @@ -508,7 +478,10 @@ fn voting_overflow_retraction_index_should_not_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_noop!(Elections::retract_voter(Origin::signed(1), 1), Error::::InvalidRetractionIndex); + assert_noop!( + Elections::retract_voter(Origin::signed(1), 1), + Error::::InvalidRetractionIndex + ); }); } @@ -518,7 +491,10 @@ fn voting_non_voter_retraction_should_not_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_noop!(Elections::retract_voter(Origin::signed(2), 0), Error::::RetractNonVoter); + assert_noop!( + Elections::retract_voter(Origin::signed(2), 0), + Error::::RetractNonVoter + ); }); } @@ -543,9 +519,11 @@ fn retracting_inactive_voter_should_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_ok!(Elections::reap_inactive_voter(Origin::signed(5), + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(5), (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), 2 )); @@ -580,9 +558,11 @@ fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { System::set_block_number(11); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Elections::reap_inactive_voter(Origin::signed(5), + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(5), (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), 2 )); @@ -612,11 +592,16 @@ fn retracting_inactive_voter_with_bad_reporter_index_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_noop!(Elections::reap_inactive_voter(Origin::signed(2), - 42, - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), Error::::InvalidReporterIndex); + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(2), + 42, + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + Error::::InvalidReporterIndex + ); }); } @@ -641,11 +626,16 @@ fn retracting_inactive_voter_with_bad_target_index_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_noop!(Elections::reap_inactive_voter(Origin::signed(2), - (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2, 42, - 2 - ), Error::::InvalidTargetIndex); + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(2), + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + 42, + 2 + ), + Error::::InvalidTargetIndex + ); }); } @@ -657,10 +647,34 @@ fn retracting_active_voter_should_slash_reporter() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 2)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false, false, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, true, false, false], 0, 0, 30)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, true, false], 0, 0, 40)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false, false, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, true, false, false], + 0, + 0, + 30 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, true, false], + 0, + 0, + 40 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -675,16 +689,30 @@ fn retracting_active_voter_should_slash_reporter() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20 + Elections::get_offset(20, 1), 1)); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30 + Elections::get_offset(30, 1), 1)); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 2, + 20 + Elections::get_offset(20, 1), + 1 + )); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 3, + 30 + Elections::get_offset(30, 1), + 1 + )); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::vote_index(), 2); assert_eq!(::InactiveGracePeriod::get(), 1); assert_eq!(::VotingPeriod::get(), 4); - assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 1, last_active: 0, stake: 40, pot: 0 })); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 40, pot: 0 }) + ); - assert_ok!(Elections::reap_inactive_voter(Origin::signed(4), + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(4), (voter_ids().iter().position(|&i| i == 4).unwrap() as u32).into(), 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), @@ -718,11 +746,16 @@ fn retracting_inactive_voter_by_nonvoter_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); assert_ok!(Elections::end_block(System::block_number())); - assert_noop!(Elections::reap_inactive_voter(Origin::signed(4), - 0, - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), Error::::NotVoter); + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(4), + 0, + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + Error::::NotVoter + ); }); } @@ -933,7 +966,7 @@ fn election_seats_should_be_released() { assert_ok!(Elections::end_block(System::block_number())); if Elections::members().len() == 0 { free_block = current; - break; + break } } // 11 + 2 which is the next voting period. @@ -1021,9 +1054,21 @@ fn election_presenting_loser_should_not_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1032,14 +1077,12 @@ fn election_presenting_loser_should_not_work() { assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)])); - assert_noop!(Elections::present_winner(Origin::signed(4), 2, 20, 0), Error::::UnworthyCandidate); + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 20, 0), + Error::::UnworthyCandidate + ); }); } @@ -1054,9 +1097,21 @@ fn election_presenting_loser_first_should_not_matter() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1066,12 +1121,7 @@ fn election_presenting_loser_first_should_not_matter() { assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)])); }); } @@ -1098,7 +1148,10 @@ fn election_present_with_invalid_vote_index_should_not_work() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); - assert_noop!(Elections::present_winner(Origin::signed(4), 2, 20, 1), Error::::InvalidVoteIndex); + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 20, 1), + Error::::InvalidVoteIndex + ); }); } @@ -1115,10 +1168,10 @@ fn election_present_when_presenter_is_poor_should_not_work() { let _ = Balances::make_free_balance_be(&1, 15); assert!(!Elections::presentation_active()); - // -3 + // -3 assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); assert_eq!(Balances::free_balance(1), 12); - // -2 -5 + // -2 -5 assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 15)); assert_ok!(Elections::end_block(System::block_number())); @@ -1126,8 +1179,8 @@ fn election_present_when_presenter_is_poor_should_not_work() { assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 5); if p > 5 { - assert_noop!(Elections::present_winner( - Origin::signed(1), 1, 10, 0), + assert_noop!( + Elections::present_winner(Origin::signed(1), 1, 10, 0), Error::::InsufficientPresenterFunds, ); } else { @@ -1153,7 +1206,10 @@ fn election_invalid_present_tally_should_slash() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); - assert_err!(Elections::present_winner(Origin::signed(4), 2, 80, 0), Error::::IncorrectTotal); + assert_err!( + Elections::present_winner(Origin::signed(4), 2, 80, 0), + Error::::IncorrectTotal + ); assert_eq!(Balances::total_balance(&4), 38); }); @@ -1172,9 +1228,21 @@ fn election_runners_up_should_be_kept() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); @@ -1183,21 +1251,11 @@ fn election_runners_up_should_be_kept() { assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); // leaderboard length is the empty seats plus the carry count (i.e. 5 + 2), where those // to be carried are the lowest and stored in lowest indices - assert_eq!(Elections::leaderboard(), Some(vec![ - (0, 0), - (0, 0), - (0, 0), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (0, 0), (0, 0), (60, 1)])); assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); + assert_eq!(Elections::leaderboard(), Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)])); assert_ok!(Elections::end_block(System::block_number())); @@ -1210,11 +1268,26 @@ fn election_runners_up_should_be_kept() { assert!(Elections::is_a_candidate(&3)); assert!(Elections::is_a_candidate(&4)); assert_eq!(Elections::vote_index(), 1); - assert_eq!(Elections::voter_info(2), Some(VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 })); - assert_eq!(Elections::voter_info(3), Some(VoterInfo { last_win: 0, last_active: 0, stake: 30, pot: 0 })); - assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0 })); - assert_eq!(Elections::voter_info(5), Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 })); - assert_eq!(Elections::voter_info(6), Some(VoterInfo { last_win: 1, last_active: 0, stake: 60, pot: 0 })); + assert_eq!( + Elections::voter_info(2), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(3), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 30, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(5), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(6), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 60, pot: 0 }) + ); assert_eq!(Elections::candidate_reg_info(3), Some((0, 2))); assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); }); @@ -1231,9 +1304,21 @@ fn election_second_tally_should_use_runners_up() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1244,13 +1329,29 @@ fn election_second_tally_should_use_runners_up() { assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(8); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![false, false, true, false], 1, 0, 60)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![false, false, true, false], + 1, + 0, + 60 + )); assert_ok!(Elections::set_desired_seats(Origin::root(), 3)); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30 + Elections::get_offset(30, 1) + 60, 1)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40 + Elections::get_offset(40, 1), 1)); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 3, + 30 + Elections::get_offset(30, 1) + 60, + 1 + )); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 4, + 40 + Elections::get_offset(40, 1), + 1 + )); assert_ok!(Elections::end_block(System::block_number())); assert!(!Elections::presentation_active()); @@ -1262,13 +1363,25 @@ fn election_second_tally_should_use_runners_up() { assert!(!Elections::is_a_candidate(&5)); assert!(Elections::is_a_candidate(&4)); assert_eq!(Elections::vote_index(), 2); - assert_eq!(Elections::voter_info(2), Some( VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0})); - assert_eq!(Elections::voter_info(3), Some( VoterInfo { last_win: 2, last_active: 0, stake: 30, pot: 0})); - assert_eq!(Elections::voter_info(4), Some( VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0})); - assert_eq!(Elections::voter_info(5), Some( VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0})); + assert_eq!( + Elections::voter_info(2), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(3), + Some(VoterInfo { last_win: 2, last_active: 0, stake: 30, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0 }) + ); + assert_eq!( + Elections::voter_info(5), + Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 }) + ); assert_eq!( Elections::voter_info(6), - Some(VoterInfo { last_win: 2, last_active: 1, stake: 60, pot: 0}) + Some(VoterInfo { last_win: 2, last_active: 1, stake: 60, pot: 0 }) ); assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); @@ -1289,9 +1402,13 @@ fn election_loser_candidates_bond_gets_slashed() { assert_eq!(balances(&2), (17, 3)); assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 0, 0, 50)); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, true, true, true], 0, 0, 10) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, true, true, true], + 0, + 0, + 10 + )); assert_ok!(Elections::end_block(System::block_number())); @@ -1302,7 +1419,6 @@ fn election_loser_candidates_bond_gets_slashed() { assert_eq!(Elections::present_winner(Origin::signed(2), 2, 10, 0), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(1), 1, 50, 0), Ok(())); - // winner + carry assert_eq!(Elections::leaderboard(), Some(vec![(10, 3), (10, 4), (50, 1)])); assert_ok!(Elections::end_block(System::block_number())); @@ -1324,15 +1440,27 @@ fn pot_accumulating_weight_and_decaying_should_work() { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 0, 0, 600) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 0, 0, 500) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 0, 0, 100) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 0, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 0, + 0, + 500 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 0, + 0, + 100 + )); assert_ok!(Elections::end_block(System::block_number())); @@ -1348,15 +1476,15 @@ fn pot_accumulating_weight_and_decaying_should_work() { assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 1, last_active: 0, stake: 600, pot: 0}, + VoterInfo { last_win: 1, last_active: 0, stake: 600, pot: 0 }, ); assert_eq!( Elections::voter_info(5).unwrap(), - VoterInfo { last_win: 1, last_active: 0, stake: 500, pot: 0}, + VoterInfo { last_win: 1, last_active: 0, stake: 500, pot: 0 }, ); assert_eq!( Elections::voter_info(1).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}, + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 }, ); System::set_block_number(12); @@ -1365,80 +1493,144 @@ fn pot_accumulating_weight_and_decaying_should_work() { assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 1, 0, 600) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 1, 1, 500) - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 1, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 1, + 1, + 500 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(14); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 1), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 1), 1), Ok(())); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 1), 1), + Ok(()) + ); assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96, 1), (500, 5), (600, 6)])); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(6, 19), (5, 19)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 2, last_active: 1, stake: 600, pot:0 } + VoterInfo { last_win: 2, last_active: 1, stake: 600, pot: 0 } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { last_win: 2, last_active: 1, stake: 500, pot: 0 } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 } ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 2, last_active: 1, stake: 500, pot:0 }); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot:0 }); System::set_block_number(20); assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false], 2, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true, false], 2, 1, 500)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 2, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 2, + 1, + 500 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(22); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 2), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 2), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 2), 2), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96 + 93, 1), (500, 5), (600, 6)])); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 2), 2), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100 + 96 + 93, 1), (500, 5), (600, 6)]) + ); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(6, 27), (5, 27)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 3, last_active: 2, stake: 600, pot: 0} + VoterInfo { last_win: 3, last_active: 2, stake: 600, pot: 0 } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { last_win: 3, last_active: 2, stake: 500, pot: 0 } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 } ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 3, last_active: 2, stake: 500, pot: 0}); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}); - System::set_block_number(28); assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false], 3, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true, false], 3, 1, 500)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 3, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 3, + 1, + 500 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(30); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 3), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 3), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 3), 3), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96 + 93 + 90, 1), (500, 5), (600, 6)])); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 3), 3), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100 + 96 + 93 + 90, 1), (500, 5), (600, 6)]) + ); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(6, 35), (5, 35)]); assert_eq!( Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 4, last_active: 3, stake: 600, pot: 0} + VoterInfo { last_win: 4, last_active: 3, stake: 600, pot: 0 } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { last_win: 4, last_active: 3, stake: 500, pot: 0 } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0 } ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 4, last_active: 3, stake: 500, pot: 0}); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}); }) } @@ -1453,9 +1645,27 @@ fn pot_winning_resets_accumulated_pot() { assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); assert_ok!(Elections::submit_candidacy(Origin::signed(2), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false, false], 0, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, true, false, false], 0, 1, 400)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true, true], 0, 2, 300)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false, false], + 0, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, true, false, false], + 0, + 1, + 400 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false, true, true], + 0, + 2, + 300 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(6); @@ -1474,16 +1684,34 @@ fn pot_winning_resets_accumulated_pot() { assert_ok!(Elections::retract_voter(Origin::signed(4), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(4), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false, false], 1, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, true, false, false], 1, 1, 400)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false, false], + 1, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, true, false, false], + 1, + 1, + 400 + )); assert_ok!(Elections::end_block(System::block_number())); System::set_block_number(14); assert!(Elections::presentation_active()); assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 1), Ok(())); assert_eq!(Elections::present_winner(Origin::signed(4), 4, 400, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(3), 3, 300 + Elections::get_offset(300, 1), 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(2), 2, 300 + Elections::get_offset(300, 1), 1), Ok(())); + assert_eq!( + Elections::present_winner(Origin::signed(3), 3, 300 + Elections::get_offset(300, 1), 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(2), 2, 300 + Elections::get_offset(300, 1), 1), + Ok(()) + ); assert_eq!(Elections::leaderboard(), Some(vec![(400, 4), (588, 2), (588, 3), (600, 6)])); assert_ok!(Elections::end_block(System::block_number())); @@ -1497,7 +1725,10 @@ fn pot_winning_resets_accumulated_pot() { // because one of 3's candidates (3) won in previous round // 4 on the other hand will get extra weight since it was unlucky. assert_eq!(Elections::present_winner(Origin::signed(3), 2, 300, 2), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(4), 4, 400 + Elections::get_offset(400, 1), 2), Ok(())); + assert_eq!( + Elections::present_winner(Origin::signed(4), 4, 400 + Elections::get_offset(400, 1), 2), + Ok(()) + ); assert_ok!(Elections::end_block(System::block_number())); assert_eq!(Elections::members(), vec![(4, 27), (2, 27)]); @@ -1519,15 +1750,27 @@ fn pot_resubmitting_approvals_stores_pot() { assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 0, 0, 600), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 0, 1, 500), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 0, 2, 100), - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 0, + 0, + 600 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 0, + 1, + 500 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 0, + 2, + 100 + ),); assert_ok!(Elections::end_block(System::block_number())); @@ -1547,18 +1790,31 @@ fn pot_resubmitting_approvals_stores_pot() { assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 1, 0, 600), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 1, 1, 500), - ); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 1, + 0, + 600 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 1, + 1, + 500 + ),); // give 1 some new high balance let _ = Balances::make_free_balance_be(&1, 997); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 1, 2, 1000), - ); - assert_eq!(Elections::voter_info(1).unwrap(), + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 1, + 2, + 1000 + ),); + assert_eq!( + Elections::voter_info(1).unwrap(), VoterInfo { stake: 1000, // 997 + 3 which is candidacy bond. pot: Elections::get_offset(100, 1), @@ -1599,7 +1855,10 @@ fn pot_get_offset_should_work() { assert_eq!(Elections::get_offset(50_000_000_000, 0), 0); assert_eq!(Elections::get_offset(50_000_000_000, 1), 48_000_000_000); assert_eq!(Elections::get_offset(50_000_000_000, 2), 48_000_000_000 + 46_080_000_000); - assert_eq!(Elections::get_offset(50_000_000_000, 3), 48_000_000_000 + 46_080_000_000 + 44_236_800_000); + assert_eq!( + Elections::get_offset(50_000_000_000, 3), + 48_000_000_000 + 46_080_000_000 + 44_236_800_000 + ); assert_eq!( Elections::get_offset(50_000_000_000, 4), 48_000_000_000 + 46_080_000_000 + 44_236_800_000 + 42_467_328_000 diff --git a/substrate/frame/example-offchain-worker/src/lib.rs b/substrate/frame/example-offchain-worker/src/lib.rs index b7a766ad847b20aa3869a792706c33005c015c92..01f3c355fa43da78054caa8730c1e8a0ed9e91d4 100644 --- a/substrate/frame/example-offchain-worker/src/lib.rs +++ b/substrate/frame/example-offchain-worker/src/lib.rs @@ -42,24 +42,28 @@ //! one unsigned transaction floating in the network. #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode}; +use frame_support::traits::Get; use frame_system::{ self as system, offchain::{ - AppCrypto, CreateSignedTransaction, SendUnsignedTransaction, SendSignedTransaction, - SignedPayload, SigningTypes, Signer, SubmitTransaction, - } + AppCrypto, CreateSignedTransaction, SendSignedTransaction, SendUnsignedTransaction, + SignedPayload, Signer, SigningTypes, SubmitTransaction, + }, }; -use frame_support::traits::Get; +use lite_json::json::JsonValue; use sp_core::crypto::KeyTypeId; use sp_runtime::{ - RuntimeDebug, - offchain::{http, Duration, storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}}, + offchain::{ + http, + storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, + Duration, + }, traits::Zero, - transaction_validity::{InvalidTransaction, ValidTransaction, TransactionValidity}, + transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, + RuntimeDebug, }; -use codec::{Encode, Decode}; use sp_std::vec::Vec; -use lite_json::json::JsonValue; #[cfg(test)] mod tests; @@ -78,15 +82,17 @@ pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"btc!"); /// the types with this pallet-specific identifier. pub mod crypto { use super::KEY_TYPE; + use sp_core::sr25519::Signature as Sr25519Signature; use sp_runtime::{ app_crypto::{app_crypto, sr25519}, traits::Verify, }; - use sp_core::sr25519::Signature as Sr25519Signature; app_crypto!(sr25519, KEY_TYPE); pub struct TestAuthId; - impl frame_system::offchain::AppCrypto<::Signer, Sr25519Signature> for TestAuthId { + impl frame_system::offchain::AppCrypto<::Signer, Sr25519Signature> + for TestAuthId + { type RuntimeAppPublic = Public; type GenericSignature = sp_core::sr25519::Signature; type GenericPublic = sp_core::sr25519::Public; @@ -97,9 +103,9 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; /// This pallet's configuration trait #[pallet::config] @@ -179,8 +185,10 @@ pub mod pallet { let should_send = Self::choose_transaction_type(block_number); let res = match should_send { TransactionType::Signed => Self::fetch_price_and_send_signed(), - TransactionType::UnsignedForAny => Self::fetch_price_and_send_unsigned_for_any_account(block_number), - TransactionType::UnsignedForAll => Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), + TransactionType::UnsignedForAny => + Self::fetch_price_and_send_unsigned_for_any_account(block_number), + TransactionType::UnsignedForAll => + Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number), TransactionType::None => Ok(()), }; @@ -236,7 +244,7 @@ pub mod pallet { pub fn submit_price_unsigned( origin: OriginFor, _block_number: T::BlockNumber, - price: u32 + price: u32, ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. ensure_none(origin)?; @@ -283,17 +291,15 @@ pub mod pallet { /// By default unsigned transactions are disallowed, but implementing the validator /// here we make sure that some particular calls (the ones produced by offchain worker) /// are being whitelisted and marked as valid. - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { // Firstly let's check that we call the right function. - if let Call::submit_price_unsigned_with_signed_payload( - ref payload, ref signature - ) = call { - let signature_valid = SignedPayload::::verify::(payload, signature.clone()); + if let Call::submit_price_unsigned_with_signed_payload(ref payload, ref signature) = + call + { + let signature_valid = + SignedPayload::::verify::(payload, signature.clone()); if !signature_valid { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } Self::validate_transaction_parameters(&payload.block_number, &payload.price) } else if let Call::submit_price_unsigned(block_number, new_price) = call { @@ -370,11 +376,10 @@ impl Pallet { match last_send { // If we already have a value in storage and the block number is recent enough // we avoid sending another transaction at this time. - Ok(Some(block)) if block_number < block + T::GracePeriod::get() => { - Err(RECENTLY_SENT) - }, + Ok(Some(block)) if block_number < block + T::GracePeriod::get() => + Err(RECENTLY_SENT), // In every other case we attempt to acquire the lock and send a transaction. - _ => Ok(block_number) + _ => Ok(block_number), } }); @@ -396,10 +401,15 @@ impl Pallet { // the storage entry for that. (for instance store both block number and a flag // indicating the type of next transaction to send). let transaction_type = block_number % 3u32.into(); - if transaction_type == Zero::zero() { TransactionType::Signed } - else if transaction_type == T::BlockNumber::from(1u32) { TransactionType::UnsignedForAny } - else if transaction_type == T::BlockNumber::from(2u32) { TransactionType::UnsignedForAll } - else { TransactionType::Raw } + if transaction_type == Zero::zero() { + TransactionType::Signed + } else if transaction_type == T::BlockNumber::from(1u32) { + TransactionType::UnsignedForAny + } else if transaction_type == T::BlockNumber::from(2u32) { + TransactionType::UnsignedForAll + } else { + TransactionType::Raw + } }, // We are in the grace period, we should not send a transaction this time. Err(MutateStorageError::ValueFunctionFailed(RECENTLY_SENT)) => TransactionType::None, @@ -417,7 +427,7 @@ impl Pallet { let signer = Signer::::all_accounts(); if !signer.can_sign() { return Err( - "No local accounts available. Consider adding one via `author_insertKey` RPC." + "No local accounts available. Consider adding one via `author_insertKey` RPC.", )? } // Make an external HTTP request to fetch the current price. @@ -428,14 +438,12 @@ impl Pallet { // representing the call, we've just created. // Submit signed will return a vector of results for all accounts that were found in the // local keystore with expected `KEY_TYPE`. - let results = signer.send_signed_transaction( - |_account| { - // Received price is wrapped into a call to `submit_price` public function of this pallet. - // This means that the transaction, when executed, will simply call that function passing - // `price` as an argument. - Call::submit_price(price) - } - ); + let results = signer.send_signed_transaction(|_account| { + // Received price is wrapped into a call to `submit_price` public function of this pallet. + // This means that the transaction, when executed, will simply call that function passing + // `price` as an argument. + Call::submit_price(price) + }); for (acc, res) in &results { match res { @@ -480,7 +488,9 @@ impl Pallet { } /// A helper function to fetch the price, sign payload and send an unsigned transaction - fn fetch_price_and_send_unsigned_for_any_account(block_number: T::BlockNumber) -> Result<(), &'static str> { + fn fetch_price_and_send_unsigned_for_any_account( + block_number: T::BlockNumber, + ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. let next_unsigned_at = >::get(); @@ -493,23 +503,23 @@ impl Pallet { let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; // -- Sign using any account - let (_, result) = Signer::::any_account().send_unsigned_transaction( - |account| PricePayload { - price, - block_number, - public: account.public.clone() - }, - |payload, signature| { - Call::submit_price_unsigned_with_signed_payload(payload, signature) - } - ).ok_or("No local accounts accounts available.")?; + let (_, result) = Signer::::any_account() + .send_unsigned_transaction( + |account| PricePayload { price, block_number, public: account.public.clone() }, + |payload, signature| { + Call::submit_price_unsigned_with_signed_payload(payload, signature) + }, + ) + .ok_or("No local accounts accounts available.")?; result.map_err(|()| "Unable to submit transaction")?; Ok(()) } /// A helper function to fetch the price, sign payload and send an unsigned transaction - fn fetch_price_and_send_unsigned_for_all_accounts(block_number: T::BlockNumber) -> Result<(), &'static str> { + fn fetch_price_and_send_unsigned_for_all_accounts( + block_number: T::BlockNumber, + ) -> Result<(), &'static str> { // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. let next_unsigned_at = >::get(); @@ -524,18 +534,14 @@ impl Pallet { // -- Sign using all accounts let transaction_results = Signer::::all_accounts() .send_unsigned_transaction( - |account| PricePayload { - price, - block_number, - public: account.public.clone() - }, + |account| PricePayload { price, block_number, public: account.public.clone() }, |payload, signature| { Call::submit_price_unsigned_with_signed_payload(payload, signature) - } + }, ); for (_account_id, result) in transaction_results.into_iter() { if result.is_err() { - return Err("Unable to submit transaction"); + return Err("Unable to submit transaction") } } @@ -554,16 +560,12 @@ impl Pallet { // you can find in `sp_io`. The API is trying to be similar to `reqwest`, but // since we are running in a custom WASM execution environment we can't simply // import the library here. - let request = http::Request::get( - "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD" - ); + let request = + http::Request::get("https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD"); // We set the deadline for sending of the request, note that awaiting response can // have a separate deadline. Next we send the request, before that it's also possible // to alter request headers or stream body content in case of non-GET requests. - let pending = request - .deadline(deadline) - .send() - .map_err(|_| http::Error::IoError)?; + let pending = request.deadline(deadline).send().map_err(|_| http::Error::IoError)?; // The request is already being processed by the host, we are free to do anything // else in the worker (we can send multiple concurrent requests too). @@ -571,12 +573,11 @@ impl Pallet { // so we can block current thread and wait for it to finish. // Note that since the request is being driven by the host, we don't have to wait // for the request to have it complete, we will just not read the response. - let response = pending.try_wait(deadline) - .map_err(|_| http::Error::DeadlineReached)??; + let response = pending.try_wait(deadline).map_err(|_| http::Error::DeadlineReached)??; // Let's check the status code before we proceed to reading the response. if response.code != 200 { log::warn!("Unexpected status code: {}", response.code); - return Err(http::Error::Unknown); + return Err(http::Error::Unknown) } // Next we want to fully read the response body and collect it to a vector of bytes. @@ -595,7 +596,7 @@ impl Pallet { None => { log::warn!("Unable to extract price from the response: {:?}", body_str); Err(http::Error::Unknown) - } + }, }?; log::warn!("Got price: {} cents", price); @@ -610,8 +611,7 @@ impl Pallet { let val = lite_json::parse_json(price_str); let price = match val.ok()? { JsonValue::Object(obj) => { - let (_, v) = obj.into_iter() - .find(|(k, _)| k.iter().copied().eq("USD".chars()))?; + let (_, v) = obj.into_iter().find(|(k, _)| k.iter().copied().eq("USD".chars()))?; match v { JsonValue::Number(number) => number, _ => return None, @@ -661,12 +661,12 @@ impl Pallet { // Now let's check if the transaction has any chance to succeed. let next_unsigned_at = >::get(); if &next_unsigned_at > block_number { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // Let's make sure to reject transactions from the future. let current_block = >::block_number(); if ¤t_block < block_number { - return InvalidTransaction::Future.into(); + return InvalidTransaction::Future.into() } // We prioritize transactions that are more far away from current average. diff --git a/substrate/frame/example-offchain-worker/src/tests.rs b/substrate/frame/example-offchain-worker/src/tests.rs index 7d16e59490342b3618f1b223d332356d4e7a5a2f..706569e0e18d0764d67cf246de72437fbfac811d 100644 --- a/substrate/frame/example-offchain-worker/src/tests.rs +++ b/substrate/frame/example-offchain-worker/src/tests.rs @@ -15,28 +15,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; use crate as example_offchain_worker; -use std::sync::Arc; +use crate::*; use codec::Decode; use frame_support::{assert_ok, parameter_types}; use sp_core::{ - H256, - offchain::{OffchainWorkerExt, TransactionPoolExt, testing}, + offchain::{testing, OffchainWorkerExt, TransactionPoolExt}, sr25519::Signature, + H256, }; +use std::sync::Arc; -use sp_keystore::{ - {KeystoreExt, SyncCryptoStore}, - testing::KeyStore, -}; +use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStore}; use sp_runtime::{ - RuntimeAppPublic, testing::{Header, TestXt}, - traits::{ - BlakeTwo256, IdentityLookup, Extrinsic as ExtrinsicT, - IdentifyAccount, Verify, - }, + traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, + RuntimeAppPublic, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -93,14 +87,16 @@ impl frame_system::offchain::SigningTypes for Test { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Test where +impl frame_system::offchain::SendTransactionTypes for Test +where Call: From, { type OverarchingCall = Call; type Extrinsic = Extrinsic; } -impl frame_system::offchain::CreateSignedTransaction for Test where +impl frame_system::offchain::CreateSignedTransaction for Test +where Call: From, { fn create_transaction>( @@ -190,7 +186,6 @@ fn knows_how_to_mock_several_http_calls() { }); } - t.execute_with(|| { let price1 = Example::fetch_price().unwrap(); let price2 = Example::fetch_price().unwrap(); @@ -200,12 +195,12 @@ fn knows_how_to_mock_several_http_calls() { assert_eq!(price2, 200); assert_eq!(price3, 300); }) - } #[test] fn should_submit_signed_transaction_on_chain() { - const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + const PHRASE: &str = + "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); @@ -213,9 +208,9 @@ fn should_submit_signed_transaction_on_chain() { SyncCryptoStore::sr25519_generate_new( &keystore, crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); - + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); let mut t = sp_io::TestExternalities::default(); t.register_extension(OffchainWorkerExt::new(offchain)); @@ -238,7 +233,8 @@ fn should_submit_signed_transaction_on_chain() { #[test] fn should_submit_unsigned_transaction_on_chain_for_any_account() { - const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + const PHRASE: &str = + "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); @@ -247,8 +243,9 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { SyncCryptoStore::sr25519_generate_new( &keystore, crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) @@ -276,13 +273,18 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload(body, signature)) = tx.call { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload( + body, + signature, + )) = tx.call + { assert_eq!(body, price_payload); - let signature_valid = ::Public, - ::BlockNumber - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = + ::Public, + ::BlockNumber, + > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); } @@ -291,7 +293,8 @@ fn should_submit_unsigned_transaction_on_chain_for_any_account() { #[test] fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { - const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + const PHRASE: &str = + "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; let (offchain, offchain_state) = testing::TestOffchainExt::new(); let (pool, pool_state) = testing::TestTransactionPoolExt::new(); @@ -300,8 +303,9 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { SyncCryptoStore::sr25519_generate_new( &keystore, crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); let public_key = SyncCryptoStore::sr25519_public_keys(&keystore, crate::crypto::Public::ID) .get(0) @@ -329,13 +333,18 @@ fn should_submit_unsigned_transaction_on_chain_for_all_accounts() { let tx = pool_state.write().transactions.pop().unwrap(); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload(body, signature)) = tx.call { + if let Call::Example(crate::Call::submit_price_unsigned_with_signed_payload( + body, + signature, + )) = tx.call + { assert_eq!(body, price_payload); - let signature_valid = ::Public, - ::BlockNumber - > as SignedPayload>::verify::(&price_payload, signature); + let signature_valid = + ::Public, + ::BlockNumber, + > as SignedPayload>::verify::(&price_payload, signature); assert!(signature_valid); } diff --git a/substrate/frame/example-parallel/src/lib.rs b/substrate/frame/example-parallel/src/lib.rs index 24668c5b5ab0c23d87057fe44867b5c4282c0cc2..c41cd2401dd257340bb91114417c5d01be9af680 100644 --- a/substrate/frame/example-parallel/src/lib.rs +++ b/substrate/frame/example-parallel/src/lib.rs @@ -24,7 +24,7 @@ use sp_runtime::RuntimeDebug; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::vec::Vec; #[cfg(test)] @@ -34,9 +34,9 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { @@ -65,9 +65,10 @@ pub mod pallet { /// The example utilizes parallel execution by checking half of the /// signatures in spawned task. #[pallet::weight(0)] - pub fn enlist_participants(origin: OriginFor, participants: Vec) - -> DispatchResultWithPostInfo - { + pub fn enlist_participants( + origin: OriginFor, + participants: Vec, + ) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; if validate_participants_parallel(&>::get(), &participants[..]) { @@ -103,21 +104,20 @@ pub struct EnlistedParticipant { impl EnlistedParticipant { fn verify(&self, event_id: &[u8]) -> bool { use sp_core::Public; - use std::convert::TryFrom; use sp_runtime::traits::Verify; + use std::convert::TryFrom; match sp_core::sr25519::Signature::try_from(&self.signature[..]) { Ok(signature) => { let public = sp_core::sr25519::Public::from_slice(self.account.as_ref()); signature.verify(event_id, &public) - } - _ => false + }, + _ => false, } } } fn validate_participants_parallel(event_id: &[u8], participants: &[EnlistedParticipant]) -> bool { - fn spawn_verify(data: Vec) -> Vec { let stream = &mut &data[..]; let event_id = Vec::::decode(stream).expect("Failed to decode"); @@ -138,10 +138,10 @@ fn validate_participants_parallel(event_id: &[u8], participants: &[EnlistedParti let handle = sp_tasks::spawn(spawn_verify, async_payload); let mut result = true; - for participant in &participants[participants.len()/2+1..] { + for participant in &participants[participants.len() / 2 + 1..] { if !participant.verify(event_id) { result = false; - break; + break } } diff --git a/substrate/frame/example-parallel/src/tests.rs b/substrate/frame/example-parallel/src/tests.rs index 395290c0bf6e784f127c35bffea10ce56aadbed4..f67c5ae51b50451ca0b374dedad6c7ec22077239 100644 --- a/substrate/frame/example-parallel/src/tests.rs +++ b/substrate/frame/example-parallel/src/tests.rs @@ -20,8 +20,9 @@ use crate::{self as pallet_example_parallel, *}; use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, + testing::Header, traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -108,7 +109,6 @@ fn it_can_enlist() { assert_eq!(Example::participants().len(), 2); }); - } #[test] @@ -146,5 +146,4 @@ fn one_wrong_will_not_enlist_anyone() { assert_eq!(Example::participants().len(), 0); }); - } diff --git a/substrate/frame/example/src/benchmarking.rs b/substrate/frame/example/src/benchmarking.rs index 64602ca41cee95526a6ab59e1c6437f8dfdb669f..cdf6c152a4880ba15a6ad63587dd4607494f3523 100644 --- a/substrate/frame/example/src/benchmarking.rs +++ b/substrate/frame/example/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use crate::*; -use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_system::RawOrigin; // To actually run this benchmark on pallet-example, we need to put this pallet into the @@ -33,7 +33,7 @@ use frame_system::RawOrigin; // Details on using the benchmarks macro can be seen at: // https://substrate.dev/rustdocs/v3.0.0/frame_benchmarking/macro.benchmarks.html -benchmarks!{ +benchmarks! { // This will measure the execution time of `set_dummy` for b in [1..1000] range. set_dummy_benchmark { // This is the benchmark setup phase diff --git a/substrate/frame/example/src/lib.rs b/substrate/frame/example/src/lib.rs index f5014b75640ba06ba0102af0f73c5b4b51c3b094..48b356df792ea9c83db41f5dfe6ed776dd0abfde 100644 --- a/substrate/frame/example/src/lib.rs +++ b/substrate/frame/example/src/lib.rs @@ -255,25 +255,21 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{ - prelude::*, - marker::PhantomData -}; +use codec::{Decode, Encode}; use frame_support::{ - dispatch::DispatchResult, traits::IsSubType, - weights::{DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, Pays}, + dispatch::DispatchResult, + traits::IsSubType, + weights::{ClassifyDispatch, DispatchClass, Pays, PaysFee, WeighData, Weight}, }; -use frame_system::{ensure_signed}; -use codec::{Encode, Decode}; +use frame_system::ensure_signed; +use log::info; use sp_runtime::{ - traits::{ - SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, Saturating - }, + traits::{Bounded, DispatchInfoOf, SaturatedConversion, Saturating, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, }; -use log::info; +use sp_std::{marker::PhantomData, prelude::*}; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; @@ -312,8 +308,7 @@ const MILLICENTS: u32 = 1_000_000_000; // fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. struct WeightForSetDummy(BalanceOf); -impl WeighData<(&BalanceOf,)> for WeightForSetDummy -{ +impl WeighData<(&BalanceOf,)> for WeightForSetDummy { fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { let multiplier = self.0; // *target.0 is the amount passed into the extrinsic @@ -343,9 +338,9 @@ impl PaysFee<(&BalanceOf,)> for WeightForSetDummy #[frame_support::pallet] pub mod pallet { // Import various types used to declare pallet in scope. + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; /// Our pallet's configuration trait. All our types and constants go in here. If the /// pallet is dependent on specific other pallets, then their configuration traits @@ -397,7 +392,7 @@ pub mod pallet { // but we could dispatch extrinsic (transaction/unsigned/inherent) using // sp_io::submit_extrinsic. // To see example on offchain worker, please refer to example-offchain-worker pallet - // accompanied in this repository. + // accompanied in this repository. } } @@ -488,10 +483,7 @@ pub mod pallet { #[pallet::weight( ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) )] - pub fn accumulate_dummy( - origin: OriginFor, - increase_by: T::Balance - ) -> DispatchResult { + pub fn accumulate_dummy(origin: OriginFor, increase_by: T::Balance) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. let _sender = ensure_signed(origin)?; @@ -610,11 +602,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { - dummy: Default::default(), - bar: Default::default(), - foo: Default::default(), - } + Self { dummy: Default::default(), bar: Default::default(), foo: Default::default() } } } @@ -709,7 +697,9 @@ where type AdditionalSigned = (); type Pre = (); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn validate( &self, @@ -731,7 +721,7 @@ where let mut valid_tx = ValidTransaction::default(); valid_tx.priority = Bounded::max_value(); Ok(valid_tx) - } + }, _ => Ok(Default::default()), } } diff --git a/substrate/frame/example/src/tests.rs b/substrate/frame/example/src/tests.rs index 68a9237921805e3bac5b6bc439d72bc87b97840e..18089888dba1a7bd6280d250ebb5fdd9b92cf7d4 100644 --- a/substrate/frame/example/src/tests.rs +++ b/substrate/frame/example/src/tests.rs @@ -20,14 +20,16 @@ use crate::*; use frame_support::{ assert_ok, parameter_types, - weights::{DispatchInfo, GetDispatchInfo}, traits::OnInitialize + traits::OnInitialize, + weights::{DispatchInfo, GetDispatchInfo}, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - testing::Header, BuildStorage, + testing::Header, traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; // Reexport crate as its pallet name for construct_runtime. use crate as pallet_example; @@ -115,7 +117,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { bar: vec![(1, 2), (2, 3)], foo: 24, }, - }.build_storage().unwrap(); + } + .build_storage() + .unwrap(); t.into() } @@ -163,7 +167,8 @@ fn signed_ext_watch_dummy_works() { let info = DispatchInfo::default(); assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 150) + WatchDummy::(PhantomData) + .validate(&1, &call, &info, 150) .unwrap() .priority, u64::MAX, @@ -183,7 +188,6 @@ fn weights_work() { // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` assert!(info1.weight > 0); - // `set_dummy` is simpler than `accumulate_dummy`, and the weight // should be less. let custom_call = >::set_dummy(20); diff --git a/substrate/frame/example/src/weights.rs b/substrate/frame/example/src/weights.rs index db6491335c76fa90abd1970c25d87c1dbdb76c81..efcfdc6729b53e511a55f8dd200de1eb58fe470a 100644 --- a/substrate/frame/example/src/weights.rs +++ b/substrate/frame/example/src/weights.rs @@ -45,6 +45,7 @@ // ./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/executive/src/lib.rs b/substrate/frame/executive/src/lib.rs index a11a5172dc95e49b8cf98aefce1c81ca59a65b1f..65512998252ac451a7c84674b668737ee6fe166e 100644 --- a/substrate/frame/executive/src/lib.rs +++ b/substrate/frame/executive/src/lib.rs @@ -116,25 +116,26 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, marker::PhantomData}; +use codec::{Codec, Encode}; use frame_support::{ - weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, + dispatch::PostDispatchInfo, traits::{ - OnInitialize, OnIdle, OnFinalize, OnRuntimeUpgrade, OffchainWorker, ExecuteBlock, - EnsureInherentsAreFirst, + EnsureInherentsAreFirst, ExecuteBlock, OffchainWorker, OnFinalize, OnIdle, OnInitialize, + OnRuntimeUpgrade, }, - dispatch::PostDispatchInfo, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; +use frame_system::DigestOf; use sp_runtime::{ - generic::Digest, ApplyExtrinsicResult, + generic::Digest, traits::{ - self, Header, Zero, One, Checkable, Applyable, CheckEqual, ValidateUnsigned, NumberFor, - Dispatchable, Saturating, + self, Applyable, CheckEqual, Checkable, Dispatchable, Header, NumberFor, One, Saturating, + ValidateUnsigned, Zero, }, - transaction_validity::{TransactionValidity, TransactionSource}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, }; -use codec::{Codec, Encode}; -use frame_system::DigestOf; +use sp_std::{marker::PhantomData, prelude::*}; pub type CheckedOf = >::Checked; pub type CallOf = as Applyable>::Call; @@ -152,31 +153,29 @@ pub type OriginOf = as Dispatchable>::Origin; /// already called by `AllPallets`. It will be called before all modules will /// be called. pub struct Executive( - PhantomData<(System, Block, Context, UnsignedValidator, AllPallets, OnRuntimeUpgrade)> + PhantomData<(System, Block, Context, UnsignedValidator, AllPallets, OnRuntimeUpgrade)>, ); impl< - System: frame_system::Config + EnsureInherentsAreFirst, - Block: traits::Block, - Context: Default, - UnsignedValidator, - AllPallets: - OnRuntimeUpgrade + - OnInitialize + - OnIdle + - OnFinalize + - OffchainWorker, - COnRuntimeUpgrade: OnRuntimeUpgrade, -> ExecuteBlock for - Executive + System: frame_system::Config + EnsureInherentsAreFirst, + Block: traits::Block

, + Context: Default, + UnsignedValidator, + AllPallets: OnRuntimeUpgrade + + OnInitialize + + OnIdle + + OnFinalize + + OffchainWorker, + COnRuntimeUpgrade: OnRuntimeUpgrade, + > ExecuteBlock + for Executive where Block::Extrinsic: Checkable + Codec, - CheckedOf: - Applyable + - GetDispatchInfo, - CallOf: Dispatchable, + CheckedOf: Applyable + GetDispatchInfo, + CallOf: + Dispatchable, OriginOf: From>, - UnsignedValidator: ValidateUnsigned>, + UnsignedValidator: ValidateUnsigned>, { fn execute_block(block: Block) { Executive::< @@ -249,20 +248,16 @@ where sp_io::init_tracing(); sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); let digests = Self::extract_pre_digest(&header); - Self::initialize_block_impl( - header.number(), - header.parent_hash(), - &digests - ); + Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); } fn extract_pre_digest(header: &System::Header) -> DigestOf { let mut digest = >::default(); - header.digest().logs() - .iter() - .for_each(|d| if d.as_pre_runtime().is_some() { + header.digest().logs().iter().for_each(|d| { + if d.as_pre_runtime().is_some() { digest.push(d.clone()) - }); + } + }); digest } @@ -281,16 +276,19 @@ where digest, frame_system::InitKind::Full, ); + weight = weight.saturating_add( as OnInitialize< + System::BlockNumber, + >>::on_initialize(*block_number)); weight = weight.saturating_add( - as OnInitialize>::on_initialize(*block_number) + >::on_initialize(*block_number), ); weight = weight.saturating_add( - >::on_initialize(*block_number) + >::get().base_block, ); - weight = weight.saturating_add( - >::get().base_block + >::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, ); - >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); frame_system::Pallet::::note_finished_initialize(); } @@ -317,8 +315,9 @@ where // Check that `parent_hash` is correct. let n = header.number().clone(); assert!( - n > System::BlockNumber::zero() - && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), + n > System::BlockNumber::zero() && + >::block_hash(n - System::BlockNumber::one()) == + *header.parent_hash(), "Parent hash should be valid.", ); @@ -358,9 +357,11 @@ where extrinsics: Vec, block_number: NumberFor, ) { - extrinsics.into_iter().for_each(|e| if let Err(e) = Self::apply_extrinsic(e) { - let err: &'static str = e.into(); - panic!("{}", err) + extrinsics.into_iter().for_each(|e| { + if let Err(e) = Self::apply_extrinsic(e) { + let err: &'static str = e.into(); + panic!("{}", err) + } }); // post-extrinsics book-keeping @@ -373,7 +374,7 @@ where /// except state-root. pub fn finalize_block() -> System::Header { sp_io::init_tracing(); - sp_tracing::enter_span!( sp_tracing::Level::TRACE, "finalize_block" ); + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "finalize_block"); >::note_finished_extrinsics(); let block_number = >::block_number(); @@ -383,26 +384,31 @@ where } fn idle_and_finalize_hook(block_number: NumberFor) { - let weight = >::block_weight(); - let max_weight = >::get().max_block; + let weight = >::block_weight(); + let max_weight = >::get().max_block; let mut remaining_weight = max_weight.saturating_sub(weight.total()); if remaining_weight > 0 { let mut used_weight = as OnIdle>::on_idle( block_number, - remaining_weight + remaining_weight, ); remaining_weight = remaining_weight.saturating_sub(used_weight); used_weight = >::on_idle( block_number, - remaining_weight + remaining_weight, ) .saturating_add(used_weight); - >::register_extra_weight_unchecked(used_weight, DispatchClass::Mandatory); + >::register_extra_weight_unchecked( + used_weight, + DispatchClass::Mandatory, + ); } - as OnFinalize>::on_finalize(block_number); + as OnFinalize>::on_finalize( + block_number, + ); >::on_finalize(block_number); } @@ -423,10 +429,8 @@ where encoded_len: usize, to_note: Vec, ) -> ApplyExtrinsicResult { - sp_tracing::enter_span!( - sp_tracing::info_span!("apply_extrinsic", - ext=?sp_core::hexdisplay::HexDisplay::from(&uxt.encode())) - ); + sp_tracing::enter_span!(sp_tracing::info_span!("apply_extrinsic", + ext=?sp_core::hexdisplay::HexDisplay::from(&uxt.encode()))); // Verify that the signature is good. let xt = uxt.check(&Default::default())?; @@ -493,17 +497,17 @@ where frame_system::InitKind::Inspection, ); - enter_span!{ sp_tracing::Level::TRACE, "validate_transaction" }; + enter_span! { sp_tracing::Level::TRACE, "validate_transaction" }; - let encoded_len = within_span!{ sp_tracing::Level::TRACE, "using_encoded"; + let encoded_len = within_span! { sp_tracing::Level::TRACE, "using_encoded"; uxt.using_encoded(|d| d.len()) }; - let xt = within_span!{ sp_tracing::Level::TRACE, "check"; + let xt = within_span! { sp_tracing::Level::TRACE, "check"; uxt.check(&Default::default()) }?; - let dispatch_info = within_span!{ sp_tracing::Level::TRACE, "dispatch_info"; + let dispatch_info = within_span! { sp_tracing::Level::TRACE, "dispatch_info"; xt.get_dispatch_info() }; @@ -537,35 +541,34 @@ where } } - #[cfg(test)] mod tests { use super::*; - use sp_core::H256; - use sp_runtime::{ - generic::{Era, DigestItem}, DispatchError, testing::{Digest, Header, Block}, - traits::{Header as HeaderT, BlakeTwo256, IdentityLookup, Block as BlockT}, - transaction_validity::{ - InvalidTransaction, ValidTransaction, TransactionValidityError, UnknownTransaction - }, - }; use frame_support::{ assert_err, parameter_types, - weights::{Weight, RuntimeDbWeight, IdentityFee, WeightToFeePolynomial}, traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, + weights::{IdentityFee, RuntimeDbWeight, Weight, WeightToFeePolynomial}, }; - use frame_system::{ - Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo, - }; - use pallet_transaction_payment::CurrencyAdapter; - use pallet_balances::Call as BalancesCall; + use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; use hex_literal::hex; + use pallet_balances::Call as BalancesCall; + use pallet_transaction_payment::CurrencyAdapter; + use sp_core::H256; + use sp_runtime::{ + generic::{DigestItem, Era}, + testing::{Block, Digest, Header}, + traits::{BlakeTwo256, Block as BlockT, Header as HeaderT, IdentityLookup}, + transaction_validity::{ + InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, + }, + DispatchError, + }; const TEST_KEY: &[u8] = &*b":test:key:"; mod custom { - use frame_support::weights::{Weight, DispatchClass}; + use frame_support::weights::{DispatchClass, Weight}; use sp_runtime::transaction_validity::{ - UnknownTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, }; pub trait Config: frame_system::Config {} @@ -658,13 +661,10 @@ mod tests { Call::allowed_unsigned(..) => Ok(Default::default()), _ => UnknownTransaction::NoUnsignedValidator.into(), } - } // Inherent call is accepted for being dispatched - fn pre_dispatch( - call: &Self::Call, - ) -> Result<(), TransactionValidityError> { + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { match call { Call::allowed_unsigned(..) => Ok(()), Call::inherent_call(..) => Ok(()), @@ -793,7 +793,7 @@ mod tests { ChainContext, Runtime, AllPallets, - CustomOnRuntimeUpgrade + CustomOnRuntimeUpgrade, >; fn extra(nonce: u64, fee: Balance) -> SignedExtra { @@ -801,7 +801,7 @@ mod tests { frame_system::CheckEra::from(Era::Immortal), frame_system::CheckNonce::from(nonce), frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(fee) + pallet_transaction_payment::ChargeTransactionPayment::from(fee), ) } @@ -812,14 +812,16 @@ mod tests { #[test] fn balance_transfer_dispatch_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 211)], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 211)] } + .assimilate_storage(&mut t) + .unwrap(); let xt = TestXt::new(Call::Balances(BalancesCall::transfer(2, 69)), sign_extra(1, 0, 0)); let weight = xt.get_dispatch_info().weight + - ::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let fee: Balance - = ::WeightToFee::calc(&weight); + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; + let fee: Balance = + ::WeightToFee::calc(&weight); let mut t = sp_io::TestExternalities::new(t); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -838,9 +840,9 @@ mod tests { fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 111 * balance_factor)], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); t.into() } @@ -851,9 +853,15 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5").into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, + state_root: hex!( + "1039e1a4bd0cf5deefe65f313577e70169c41c7773d6acf31ca8d671397559f5" + ) + .into(), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -869,8 +877,11 @@ mod tests { parent_hash: [69u8; 32].into(), number: 1, state_root: [0u8; 32].into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -885,9 +896,12 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48").into(), + state_root: hex!( + "49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48" + ) + .into(), extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![], }, + digest: Digest { logs: vec![] }, }, extrinsics: vec![], }); @@ -907,7 +921,8 @@ mod tests { [69u8; 32].into(), Digest::default(), )); - assert_err!(Executive::apply_extrinsic(xt), + assert_err!( + Executive::apply_extrinsic(xt), TransactionValidityError::Invalid(InvalidTransaction::Future) ); assert_eq!(>::extrinsic_index(), Some(0)); @@ -924,8 +939,7 @@ mod tests { // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); let base_block_weight = 175 + block_weights.base_block; - let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - - base_block_weight; + let limit = block_weights.get(DispatchClass::Normal).max_total.unwrap() - base_block_weight; let num_to_exhaust_block = limit / (encoded_len + 5); t.execute_with(|| { Executive::initialize_block(&Header::new( @@ -940,7 +954,8 @@ mod tests { for nonce in 0..=num_to_exhaust_block { let xt = TestXt::new( - Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, nonce.into(), 0), + Call::Balances(BalancesCall::transfer(33, 0)), + sign_extra(1, nonce.into(), 0), ); let res = Executive::apply_extrinsic(xt); if nonce != num_to_exhaust_block { @@ -950,7 +965,10 @@ mod tests { //--------------------- on_initialize + block_execution + extrinsic_base weight (encoded_len + 5) * (nonce + 1) + base_block_weight, ); - assert_eq!(>::extrinsic_index(), Some(nonce as u32 + 1)); + assert_eq!( + >::extrinsic_index(), + Some(nonce as u32 + 1) + ); } else { assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); } @@ -967,7 +985,8 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { // Block execution weight + on_initialize weight from custom module - let base_block_weight = 175 + ::BlockWeights::get().base_block; + let base_block_weight = + 175 + ::BlockWeights::get().base_block; Executive::initialize_block(&Header::new( 1, @@ -985,8 +1004,10 @@ mod tests { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); // default weight for `TestXt` == encoded length. - let extrinsic_weight = len as Weight + ::BlockWeights - ::get().get(DispatchClass::Normal).base_extrinsic; + let extrinsic_weight = len as Weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; assert_eq!( >::block_weight().total(), base_block_weight + 3 * extrinsic_weight, @@ -1051,20 +1072,14 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { as LockableCurrency>::set_lock( - id, - &1, - 110, - lock, - ); - let xt = TestXt::new( - Call::System(SystemCall::remark(vec![1u8])), - sign_extra(1, 0, 0), + id, &1, 110, lock, ); - let weight = xt.get_dispatch_info().weight + - ::BlockWeights - ::get() - .get(DispatchClass::Normal) - .base_extrinsic; + let xt = + TestXt::new(Call::System(SystemCall::remark(vec![1u8])), sign_extra(1, 0, 0)); + let weight = xt.get_dispatch_info().weight + + ::BlockWeights::get() + .get(DispatchClass::Normal) + .base_extrinsic; let fee: Balance = ::WeightToFee::calc(&weight); Executive::initialize_block(&Header::new( @@ -1096,13 +1111,12 @@ mod tests { #[test] fn block_hooks_weight_is_stored() { new_test_ext(1).execute_with(|| { - Executive::initialize_block(&Header::new_from_number(1)); Executive::finalize_block(); // NOTE: might need updates over time if new weights are introduced. // For now it only accounts for the base block execution weight and // the `on_initialize` weight defined in the custom test module. - assert_eq!(>::block_weight().total(), 175 + 175 + 10); + assert_eq!(>::block_weight().total(), 175 + 175 + 10); }) } @@ -1114,9 +1128,9 @@ mod tests { assert!(frame_system::LastRuntimeUpgrade::::exists()); assert!(!Executive::runtime_upgraded()); - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); assert!(Executive::runtime_upgraded()); assert_eq!( @@ -1124,10 +1138,12 @@ mod tests { frame_system::LastRuntimeUpgrade::::get(), ); - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + ..Default::default() + } }); assert!(Executive::runtime_upgraded()); assert_eq!( @@ -1135,11 +1151,13 @@ mod tests { frame_system::LastRuntimeUpgrade::::get(), ); - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - impl_version: 2, - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + impl_version: 2, + ..Default::default() + } }); assert!(!Executive::runtime_upgraded()); @@ -1182,9 +1200,9 @@ mod tests { fn custom_runtime_upgrade_is_called_before_modules() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called. - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); Executive::initialize_block(&Header::new( @@ -1251,9 +1269,9 @@ mod tests { fn all_weights_are_recorded_correctly() { new_test_ext(1).execute_with(|| { // Make sure `on_runtime_upgrade` is called for maximum complexity - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - ..Default::default() + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = + sp_version::RuntimeVersion { spec_version: 1, ..Default::default() } }); let block_number = 1; @@ -1270,19 +1288,21 @@ mod tests { let frame_system_upgrade_weight = frame_system::Pallet::::on_runtime_upgrade(); let custom_runtime_upgrade_weight = CustomOnRuntimeUpgrade::on_runtime_upgrade(); let runtime_upgrade_weight = ::on_runtime_upgrade(); - let frame_system_on_initialize_weight = frame_system::Pallet::::on_initialize(block_number); - let on_initialize_weight = >::on_initialize(block_number); - let base_block_weight = ::BlockWeights::get().base_block; + let frame_system_on_initialize_weight = + frame_system::Pallet::::on_initialize(block_number); + let on_initialize_weight = + >::on_initialize(block_number); + let base_block_weight = + ::BlockWeights::get().base_block; // Weights are recorded correctly assert_eq!( frame_system::Pallet::::block_weight().total(), frame_system_upgrade_weight + - custom_runtime_upgrade_weight + - runtime_upgrade_weight + - frame_system_on_initialize_weight + - on_initialize_weight + - base_block_weight, + custom_runtime_upgrade_weight + + runtime_upgrade_weight + + frame_system_on_initialize_weight + + on_initialize_weight + base_block_weight, ); }); } @@ -1294,13 +1314,8 @@ mod tests { let mut digest = Digest::default(); digest.push(DigestItem::Seal([1, 2, 3, 4], vec![5, 6, 7, 8])); - let header = Header::new( - 1, - H256::default(), - H256::default(), - parent_hash, - digest.clone(), - ); + let header = + Header::new(1, H256::default(), H256::default(), parent_hash, digest.clone()); Executive::offchain_worker(&header); diff --git a/substrate/frame/gilt/src/benchmarking.rs b/substrate/frame/gilt/src/benchmarking.rs index 2ee7bffd9410edd82236d826a12f7487e9f23528..73e1c9a901cb979fbc6504e5840cbbe9b107e927 100644 --- a/substrate/frame/gilt/src/benchmarking.rs +++ b/substrate/frame/gilt/src/benchmarking.rs @@ -19,17 +19,21 @@ #![cfg(feature = "runtime-benchmarks")] -use sp_std::prelude::*; use super::*; -use sp_runtime::traits::{Zero, Bounded}; -use sp_arithmetic::Perquintill; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::{ + dispatch::UnfilteredDispatchable, + traits::{Currency, EnsureOrigin, Get}, +}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; -use frame_support::{traits::{Currency, Get, EnsureOrigin}, dispatch::UnfilteredDispatchable}; +use sp_arithmetic::Perquintill; +use sp_runtime::traits::{Bounded, Zero}; +use sp_std::prelude::*; use crate::Pallet as Gilt; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; benchmarks! { place_bid { @@ -129,8 +133,4 @@ benchmarks! { }: { Gilt::::pursue_target(q) } } -impl_benchmark_test_suite!( - Gilt, - crate::mock::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(Gilt, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/substrate/frame/gilt/src/lib.rs b/substrate/frame/gilt/src/lib.rs index 6956191ecb4daf0bd1b92b29055dc2b5c700368f..3803d78c053173867c15e084d8c0c21c80e22340 100644 --- a/substrate/frame/gilt/src/lib.rs +++ b/substrate/frame/gilt/src/lib.rs @@ -67,28 +67,33 @@ pub use pallet::*; +mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -mod benchmarking; pub mod weights; #[frame_support::pallet] pub mod pallet { - use sp_std::prelude::*; - use sp_arithmetic::{Perquintill, PerThing}; - use sp_runtime::traits::{Zero, Saturating}; - use frame_support::traits::{Currency, OnUnbalanced, ReservableCurrency}; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; pub use crate::weights::WeightInfo; + use frame_support::{ + pallet_prelude::*, + traits::{Currency, OnUnbalanced, ReservableCurrency}, + }; + use frame_system::pallet_prelude::*; + use sp_arithmetic::{PerThing, Perquintill}; + use sp_runtime::traits::{Saturating, Zero}; + use sp_std::prelude::*; - type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; - type PositiveImbalanceOf = - <::Currency as Currency<::AccountId>>::PositiveImbalance; - type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + type PositiveImbalanceOf = <::Currency as Currency< + ::AccountId, + >>::PositiveImbalance; + type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, + >>::NegativeImbalance; #[pallet::config] pub trait Config: frame_system::Config { @@ -96,13 +101,17 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// Currency type that this works on. - type Currency: ReservableCurrency; + type Currency: ReservableCurrency; /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to /// `From`. - type CurrencyBalance: - sp_runtime::traits::AtLeast32BitUnsigned + codec::FullCodec + Copy - + MaybeSerializeDeserialize + sp_std::fmt::Debug + Default + From; + type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned + + codec::FullCodec + + Copy + + MaybeSerializeDeserialize + + sp_std::fmt::Debug + + Default + + From; /// Origin required for setting the target proportion to be under gilt. type AdminOrigin: EnsureOrigin; @@ -227,13 +236,8 @@ pub mod pallet { /// The queues of bids ready to become gilts. Indexed by duration (in `Period`s). #[pallet::storage] - pub type Queues = StorageMap< - _, - Blake2_128Concat, - u32, - Vec, T::AccountId>>, - ValueQuery, - >; + pub type Queues = + StorageMap<_, Blake2_128Concat, u32, Vec, T::AccountId>>, ValueQuery>; /// Information relating to the gilts currently active. #[pallet::storage] @@ -245,7 +249,11 @@ pub mod pallet { _, Blake2_128Concat, ActiveIndex, - ActiveGilt, ::AccountId, ::BlockNumber>, + ActiveGilt< + BalanceOf, + ::AccountId, + ::BlockNumber, + >, OptionQuery, >; @@ -255,7 +263,7 @@ pub mod pallet { #[pallet::genesis_build] impl GenesisBuild for GenesisConfig { - fn build(&self) { + fn build(&self) { QueueTotals::::put(vec![(0, BalanceOf::::zero()); T::QueueCount::get() as usize]); } } @@ -311,7 +319,7 @@ pub mod pallet { } #[pallet::call] - impl Pallet { + impl Pallet { /// Place a bid for a gilt to be issued. /// /// Origin must be Signed, and account must have at least `amount` in free balance. @@ -335,35 +343,35 @@ pub mod pallet { ensure!(amount >= T::MinFreeze::get(), Error::::AmountTooSmall); let queue_count = T::QueueCount::get() as usize; - let queue_index = duration.checked_sub(1) - .ok_or(Error::::DurationTooSmall)? as usize; + let queue_index = duration.checked_sub(1).ok_or(Error::::DurationTooSmall)? as usize; ensure!(queue_index < queue_count, Error::::DurationTooBig); - let net = Queues::::try_mutate(duration, |q| - -> Result<(u32, BalanceOf::), DispatchError> - { - let queue_full = q.len() == T::MaxQueueLen::get() as usize; - ensure!(!queue_full || q[0].amount < amount, Error::::BidTooLow); - T::Currency::reserve(&who, amount)?; - - // queue is - let mut bid = GiltBid { amount, who: who.clone() }; - let net = if queue_full { - sp_std::mem::swap(&mut q[0], &mut bid); - T::Currency::unreserve(&bid.who, bid.amount); - (0, amount - bid.amount) - } else { - q.insert(0, bid); - (1, amount) - }; - - let sorted_item_count = q.len().saturating_sub(T::FifoQueueLen::get() as usize); - if sorted_item_count > 1 { - q[0..sorted_item_count].sort_by_key(|x| x.amount); - } + let net = Queues::::try_mutate( + duration, + |q| -> Result<(u32, BalanceOf), DispatchError> { + let queue_full = q.len() == T::MaxQueueLen::get() as usize; + ensure!(!queue_full || q[0].amount < amount, Error::::BidTooLow); + T::Currency::reserve(&who, amount)?; + + // queue is + let mut bid = GiltBid { amount, who: who.clone() }; + let net = if queue_full { + sp_std::mem::swap(&mut q[0], &mut bid); + T::Currency::unreserve(&bid.who, bid.amount); + (0, amount - bid.amount) + } else { + q.insert(0, bid); + (1, amount) + }; + + let sorted_item_count = q.len().saturating_sub(T::FifoQueueLen::get() as usize); + if sorted_item_count > 1 { + q[0..sorted_item_count].sort_by_key(|x| x.amount); + } - Ok(net) - })?; + Ok(net) + }, + )?; QueueTotals::::mutate(|qs| { qs.resize(queue_count, (0, Zero::zero())); qs[queue_index].0 += net.0; @@ -390,8 +398,7 @@ pub mod pallet { let who = ensure_signed(origin)?; let queue_count = T::QueueCount::get() as usize; - let queue_index = duration.checked_sub(1) - .ok_or(Error::::DurationTooSmall)? as usize; + let queue_index = duration.checked_sub(1).ok_or(Error::::DurationTooSmall)? as usize; ensure!(queue_index < queue_count, Error::::DurationTooBig); let bid = GiltBid { amount, who }; @@ -453,11 +460,12 @@ pub mod pallet { Active::::remove(index); // Multiply the proportion it is by the total issued. - let total_issuance = T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); + let total_issuance = + T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); ActiveTotal::::mutate(|totals| { let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); - let effective_issuance = totals.proportion.left_from_one() - .saturating_reciprocal_mul(nongilt_issuance); + let effective_issuance = + totals.proportion.left_from_one().saturating_reciprocal_mul(nongilt_issuance); let gilt_value = gilt.proportion * effective_issuance; totals.frozen = totals.frozen.saturating_sub(gilt.amount); @@ -518,14 +526,9 @@ pub mod pallet { let total_issuance = T::Currency::total_issuance(); let non_gilt = total_issuance.saturating_sub(totals.frozen); - let effective = totals.proportion.left_from_one() - .saturating_reciprocal_mul(non_gilt); + let effective = totals.proportion.left_from_one().saturating_reciprocal_mul(non_gilt); - IssuanceInfo { - reserved: totals.frozen, - non_gilt, - effective, - } + IssuanceInfo { reserved: totals.frozen, non_gilt, effective } } /// Attempt to enlarge our gilt-set from bids in order to satisfy our desired target amount @@ -535,16 +538,17 @@ pub mod pallet { if totals.proportion < totals.target { let missing = totals.target.saturating_sub(totals.proportion); - let total_issuance = T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); + let total_issuance = + T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); - let effective_issuance = totals.proportion.left_from_one() - .saturating_reciprocal_mul(nongilt_issuance); + let effective_issuance = + totals.proportion.left_from_one().saturating_reciprocal_mul(nongilt_issuance); let intake = missing * effective_issuance; let (bids_taken, queues_hit) = Self::enlarge(intake, max_bids); let first_from_each_queue = T::WeightInfo::pursue_target_per_queue(queues_hit); let rest_from_each_queue = T::WeightInfo::pursue_target_per_item(bids_taken) - .saturating_sub(T::WeightInfo::pursue_target_per_item(queues_hit)); + .saturating_sub(T::WeightInfo::pursue_target_per_item(queues_hit)); first_from_each_queue + rest_from_each_queue } else { T::WeightInfo::pursue_target_noop() @@ -555,11 +559,9 @@ pub mod pallet { /// from the queue. /// /// Return the number of bids taken and the number of distinct queues taken from. - pub fn enlarge( - amount: BalanceOf, - max_bids: u32, - ) -> (u32, u32) { - let total_issuance = T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); + pub fn enlarge(amount: BalanceOf, max_bids: u32) -> (u32, u32) { + let total_issuance = + T::Currency::total_issuance().saturating_sub(T::IgnoredIssuance::get()); let mut remaining = amount; let mut bids_taken = 0; let mut queues_hit = 0; @@ -572,7 +574,8 @@ pub mod pallet { continue } let queue_index = duration as usize - 1; - let expiry = now.saturating_add(T::Period::get().saturating_mul(duration.into())); + let expiry = + now.saturating_add(T::Period::get().saturating_mul(duration.into())); Queues::::mutate(duration, |q| { while let Some(mut bid) = q.pop() { if remaining < bid.amount { @@ -589,7 +592,9 @@ pub mod pallet { // Now to activate the bid... let nongilt_issuance = total_issuance.saturating_sub(totals.frozen); - let effective_issuance = totals.proportion.left_from_one() + let effective_issuance = totals + .proportion + .left_from_one() .saturating_reciprocal_mul(nongilt_issuance); let n = amount; let d = effective_issuance; @@ -607,7 +612,7 @@ pub mod pallet { bids_taken += 1; if remaining.is_zero() || bids_taken == max_bids { - break; + break } } queues_hit += 1; diff --git a/substrate/frame/gilt/src/mock.rs b/substrate/frame/gilt/src/mock.rs index aeff70610d4bb7be87bcbb532446148b912b2b15..91606f185231314a1b670b7452891a925a893763 100644 --- a/substrate/frame/gilt/src/mock.rs +++ b/substrate/frame/gilt/src/mock.rs @@ -20,11 +20,14 @@ use crate as pallet_gilt; use frame_support::{ - parameter_types, ord_parameter_types, - traits::{OnInitialize, OnFinalize, GenesisBuild, Currency}, + ord_parameter_types, parameter_types, + traits::{Currency, GenesisBuild, OnFinalize, OnInitialize}, }; use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -125,9 +128,11 @@ impl pallet_gilt::Config for Test { // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); t.into() } diff --git a/substrate/frame/gilt/src/tests.rs b/substrate/frame/gilt/src/tests.rs index 2f328ba904bbe9f95394b099c14ca3fc7f2533b5..80315141e2325f542c4818594fd2ecdf69c91e66 100644 --- a/substrate/frame/gilt/src/tests.rs +++ b/substrate/frame/gilt/src/tests.rs @@ -18,10 +18,10 @@ //! Tests for Gilt pallet. use super::*; -use crate::{Error, mock::*}; -use frame_support::{assert_ok, assert_noop, dispatch::DispatchError, traits::Currency}; -use sp_arithmetic::Perquintill; +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok, dispatch::DispatchError, traits::Currency}; use pallet_balances::Error as BalancesError; +use sp_arithmetic::Perquintill; #[test] fn basic_setup_works() { @@ -31,12 +31,15 @@ fn basic_setup_works() { for q in 0..3 { assert!(Queues::::get(q).is_empty()); } - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 0, - proportion: Perquintill::zero(), - index: 0, - target: Perquintill::zero(), - }); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 0, + target: Perquintill::zero(), + } + ); assert_eq!(QueueTotals::::get(), vec![(0, 0); 3]); }); } @@ -49,12 +52,15 @@ fn set_target_works() { assert_noop!(Gilt::set_target(Origin::signed(2), Perquintill::from_percent(50)), e); assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(50))); - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 0, - proportion: Perquintill::zero(), - index: 0, - target: Perquintill::from_percent(50), - }); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 0, + target: Perquintill::from_percent(50), + } + ); }); } @@ -63,7 +69,10 @@ fn place_bid_works() { new_test_ext().execute_with(|| { run_to_block(1); assert_noop!(Gilt::place_bid(Origin::signed(1), 1, 2), Error::::AmountTooSmall); - assert_noop!(Gilt::place_bid(Origin::signed(1), 101, 2), BalancesError::::InsufficientBalance); + assert_noop!( + Gilt::place_bid(Origin::signed(1), 101, 2), + BalancesError::::InsufficientBalance + ); assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 4), Error::::DurationTooBig); assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 10); @@ -86,11 +95,14 @@ fn place_bid_queuing_works() { assert_ok!(Gilt::place_bid(Origin::signed(1), 25, 2)); assert_eq!(Balances::reserved_balance(1), 60); assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 2), Error::::BidTooLow); - assert_eq!(Queues::::get(2), vec![ - GiltBid { amount: 15, who: 1 }, - GiltBid { amount: 25, who: 1 }, - GiltBid { amount: 20, who: 1 }, - ]); + assert_eq!( + Queues::::get(2), + vec![ + GiltBid { amount: 15, who: 1 }, + GiltBid { amount: 25, who: 1 }, + GiltBid { amount: 20, who: 1 }, + ] + ); assert_eq!(QueueTotals::::get(), vec![(0, 0), (3, 60), (0, 0)]); }); } @@ -119,17 +131,16 @@ fn multiple_place_bids_works() { assert_eq!(Balances::reserved_balance(1), 40); assert_eq!(Balances::reserved_balance(2), 10); - assert_eq!(Queues::::get(1), vec![ - GiltBid { amount: 10, who: 1 }, - ]); - assert_eq!(Queues::::get(2), vec![ - GiltBid { amount: 10, who: 2 }, - GiltBid { amount: 10, who: 1 }, - GiltBid { amount: 10, who: 1 }, - ]); - assert_eq!(Queues::::get(3), vec![ - GiltBid { amount: 10, who: 1 }, - ]); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 10, who: 1 },]); + assert_eq!( + Queues::::get(2), + vec![ + GiltBid { amount: 10, who: 2 }, + GiltBid { amount: 10, who: 1 }, + GiltBid { amount: 10, who: 1 }, + ] + ); + assert_eq!(Queues::::get(3), vec![GiltBid { amount: 10, who: 1 },]); assert_eq!(QueueTotals::::get(), vec![(1, 10), (3, 30), (1, 10)]); }); } @@ -144,7 +155,7 @@ fn retract_single_item_queue_works() { assert_eq!(Balances::reserved_balance(1), 10); assert_eq!(Queues::::get(1), vec![]); - assert_eq!(Queues::::get(2), vec![ GiltBid { amount: 10, who: 1 } ]); + assert_eq!(Queues::::get(2), vec![GiltBid { amount: 10, who: 1 }]); assert_eq!(QueueTotals::::get(), vec![(0, 0), (1, 10), (0, 0)]); }); } @@ -161,13 +172,11 @@ fn retract_with_other_and_duplicate_works() { assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 2)); assert_eq!(Balances::reserved_balance(1), 20); assert_eq!(Balances::reserved_balance(2), 10); - assert_eq!(Queues::::get(1), vec![ - GiltBid { amount: 10, who: 1 }, - ]); - assert_eq!(Queues::::get(2), vec![ - GiltBid { amount: 10, who: 2 }, - GiltBid { amount: 10, who: 1 }, - ]); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 10, who: 1 },]); + assert_eq!( + Queues::::get(2), + vec![GiltBid { amount: 10, who: 2 }, GiltBid { amount: 10, who: 1 },] + ); assert_eq!(QueueTotals::::get(), vec![(1, 10), (2, 20), (0, 0)]); }); } @@ -195,22 +204,23 @@ fn basic_enlarge_works() { // Takes 2/2, then stopped because it reaches its max amount assert_eq!(Balances::reserved_balance(1), 40); assert_eq!(Balances::reserved_balance(2), 40); - assert_eq!(Queues::::get(1), vec![ GiltBid { amount: 40, who: 1 } ]); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 }]); assert_eq!(Queues::::get(2), vec![]); assert_eq!(QueueTotals::::get(), vec![(1, 40), (0, 0), (0, 0)]); - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 40, - proportion: Perquintill::from_percent(10), - index: 1, - target: Perquintill::zero(), - }); - assert_eq!(Active::::get(0).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 2, - expiry: 7, - }); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 40, + proportion: Perquintill::from_percent(10), + index: 1, + target: Perquintill::zero(), + } + ); + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { proportion: Perquintill::from_percent(10), amount: 40, who: 2, expiry: 7 } + ); }); } @@ -225,29 +235,33 @@ fn enlarge_respects_bids_limit() { Gilt::enlarge(100, 2); // Should have taken 4/3 and 2/2, then stopped because it's only allowed 2. - assert_eq!(Queues::::get(1), vec![ GiltBid { amount: 40, who: 1 } ]); - assert_eq!(Queues::::get(2), vec![ GiltBid { amount: 40, who: 3 } ]); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 }]); + assert_eq!(Queues::::get(2), vec![GiltBid { amount: 40, who: 3 }]); assert_eq!(Queues::::get(3), vec![]); assert_eq!(QueueTotals::::get(), vec![(1, 40), (1, 40), (0, 0)]); - assert_eq!(Active::::get(0).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 4, - expiry: 10, - }); - assert_eq!(Active::::get(1).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 2, - expiry: 7, - }); - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 80, - proportion: Perquintill::from_percent(20), - index: 2, - target: Perquintill::zero(), - }); + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 4, + expiry: 10, + } + ); + assert_eq!( + Active::::get(1).unwrap(), + ActiveGilt { proportion: Perquintill::from_percent(10), amount: 40, who: 2, expiry: 7 } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::zero(), + } + ); }); } @@ -259,21 +273,22 @@ fn enlarge_respects_amount_limit_and_will_split() { Gilt::enlarge(40, 2); // Takes 2/2, then stopped because it reaches its max amount - assert_eq!(Queues::::get(1), vec![ GiltBid { amount: 40, who: 1 } ]); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 }]); assert_eq!(QueueTotals::::get(), vec![(1, 40), (0, 0), (0, 0)]); - assert_eq!(Active::::get(0).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 1, - expiry: 4, - }); - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 40, - proportion: Perquintill::from_percent(10), - index: 1, - target: Perquintill::zero(), - }); + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { proportion: Perquintill::from_percent(10), amount: 40, who: 1, expiry: 4 } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 40, + proportion: Perquintill::from_percent(10), + index: 1, + target: Perquintill::zero(), + } + ); }); } @@ -290,12 +305,15 @@ fn basic_thaw_works() { assert_noop!(Gilt::thaw(Origin::signed(2), 0), Error::::NotOwner); assert_ok!(Gilt::thaw(Origin::signed(1), 0)); - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 0, - proportion: Perquintill::zero(), - index: 1, - target: Perquintill::zero(), - }); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 1, + target: Perquintill::zero(), + } + ); assert_eq!(Active::::get(0), None); assert_eq!(Balances::free_balance(1), 100); assert_eq!(Balances::reserved_balance(1), 0); @@ -426,98 +444,124 @@ fn enlargement_to_target_works() { assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(40))); run_to_block(3); - assert_eq!(Queues::::get(1), vec![ - GiltBid { amount: 40, who: 1 }, - ]); - assert_eq!(Queues::::get(2), vec![ - GiltBid { amount: 40, who: 2 }, - GiltBid { amount: 40, who: 1 }, - ]); - assert_eq!(Queues::::get(3), vec![ - GiltBid { amount: 40, who: 3 }, - GiltBid { amount: 40, who: 2 }, - ]); + assert_eq!(Queues::::get(1), vec![GiltBid { amount: 40, who: 1 },]); + assert_eq!( + Queues::::get(2), + vec![GiltBid { amount: 40, who: 2 }, GiltBid { amount: 40, who: 1 },] + ); + assert_eq!( + Queues::::get(3), + vec![GiltBid { amount: 40, who: 3 }, GiltBid { amount: 40, who: 2 },] + ); assert_eq!(QueueTotals::::get(), vec![(1, 40), (2, 80), (2, 80)]); run_to_block(4); // Two new gilts should have been issued to 2 & 3 for 40 each & duration of 3. - assert_eq!(Active::::get(0).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 2, - expiry: 13, - }); - assert_eq!(Active::::get(1).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 3, - expiry: 13, - - }); - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 80, - proportion: Perquintill::from_percent(20), - index: 2, - target: Perquintill::from_percent(40), - }); + assert_eq!( + Active::::get(0).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 13, + } + ); + assert_eq!( + Active::::get(1).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 3, + expiry: 13, + } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::from_percent(40), + } + ); run_to_block(5); // No change - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 80, - proportion: Perquintill::from_percent(20), - index: 2, - target: Perquintill::from_percent(40), - }); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::from_percent(40), + } + ); run_to_block(6); // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. - assert_eq!(Active::::get(2).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 1, - expiry: 12, - }); - assert_eq!(Active::::get(3).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 2, - expiry: 12, - - }); - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 160, - proportion: Perquintill::from_percent(40), - index: 4, - target: Perquintill::from_percent(40), - }); + assert_eq!( + Active::::get(2).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 1, + expiry: 12, + } + ); + assert_eq!( + Active::::get(3).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 12, + } + ); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 160, + proportion: Perquintill::from_percent(40), + index: 4, + target: Perquintill::from_percent(40), + } + ); run_to_block(8); // No change now. - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 160, - proportion: Perquintill::from_percent(40), - index: 4, - target: Perquintill::from_percent(40), - }); + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 160, + proportion: Perquintill::from_percent(40), + index: 4, + target: Perquintill::from_percent(40), + } + ); // Set target a bit higher to use up the remaining bid. assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(60))); run_to_block(10); // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. - assert_eq!(Active::::get(4).unwrap(), ActiveGilt { - proportion: Perquintill::from_percent(10), - amount: 40, - who: 1, - expiry: 13, - }); - - assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { - frozen: 200, - proportion: Perquintill::from_percent(50), - index: 5, - target: Perquintill::from_percent(60), - }); + assert_eq!( + Active::::get(4).unwrap(), + ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 1, + expiry: 13, + } + ); + + assert_eq!( + ActiveTotal::::get(), + ActiveGiltsTotal { + frozen: 200, + proportion: Perquintill::from_percent(50), + index: 5, + target: Perquintill::from_percent(60), + } + ); }); } diff --git a/substrate/frame/gilt/src/weights.rs b/substrate/frame/gilt/src/weights.rs index c9e16c041874c4be85ff39cb88140cf0c69345c7..7a12687260a7ce4b0403fc7661e55a811a905363 100644 --- a/substrate/frame/gilt/src/weights.rs +++ b/substrate/frame/gilt/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/grandpa/src/benchmarking.rs b/substrate/frame/grandpa/src/benchmarking.rs index 1bd65944f0a35b8101041e64e7252017d5481af5..d5372c5687a4b9ba62d50e7ab4a994de28496325 100644 --- a/substrate/frame/grandpa/src/benchmarking.rs +++ b/substrate/frame/grandpa/src/benchmarking.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use super::{*, Pallet as Grandpa}; +use super::{Pallet as Grandpa, *}; use frame_benchmarking::benchmarks; use frame_system::RawOrigin; use sp_core::H256; @@ -106,10 +106,7 @@ mod tests { ); println!("equivocation_proof: {:?}", equivocation_proof); - println!( - "equivocation_proof.encode(): {:?}", - equivocation_proof.encode() - ); + println!("equivocation_proof.encode(): {:?}", equivocation_proof.encode()); }); } } diff --git a/substrate/frame/grandpa/src/default_weights.rs b/substrate/frame/grandpa/src/default_weights.rs index 63122fcf4b5388f97536e0a6328c67361e2637d4..edc18a7ff8c93c560ab7092320c41ed4fb074e33 100644 --- a/substrate/frame/grandpa/src/default_weights.rs +++ b/substrate/frame/grandpa/src/default_weights.rs @@ -19,7 +19,8 @@ //! This file was not auto-generated. use frame_support::weights::{ - Weight, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, + constants::{RocksDbWeight as DbWeight, WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, + Weight, }; impl crate::WeightInfo for () { @@ -48,7 +49,6 @@ impl crate::WeightInfo for () { } fn note_stalled() -> Weight { - (3 * WEIGHT_PER_MICROS) - .saturating_add(DbWeight::get().writes(1)) + (3 * WEIGHT_PER_MICROS).saturating_add(DbWeight::get().writes(1)) } } diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index 0383d2d9a9be66d20ea6b760e19a36b9664653c9..2ef106817c3e1709c3fc9ca6eafbcf3b64fea46b 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! //! An opt-in utility module for reporting equivocations. //! //! This module defines an offence type for GRANDPA equivocations @@ -35,7 +34,6 @@ //! When using this module for enabling equivocation reporting it is required //! that the `ValidateUnsigned` for the GRANDPA pallet is used in the runtime //! definition. -//! use sp_std::prelude::*; @@ -54,7 +52,7 @@ use sp_staking::{ SessionIndex, }; -use super::{Call, Pallet, Config}; +use super::{Call, Config, Pallet}; /// A trait with utility methods for handling equivocation reports in GRANDPA. /// The offence type is generic, and the trait provides , reporting an offence @@ -130,9 +128,7 @@ pub struct EquivocationHandler> { impl Default for EquivocationHandler { fn default() -> Self { - Self { - _phantom: Default::default(), - } + Self { _phantom: Default::default() } } } @@ -209,21 +205,22 @@ impl Pallet { if let Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof) = call { // discard equivocation report not coming from the local node match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, _ => { log::warn!( target: "runtime::afg", "rejecting unsigned report equivocation transaction because it is not local/in-block." ); - return InvalidTransaction::Call.into(); - } + return InvalidTransaction::Call.into() + }, } // check report staleness is_known_offence::(equivocation_proof, key_owner_proof)?; - let longevity = >::ReportLongevity::get(); + let longevity = + >::ReportLongevity::get(); ValidTransaction::with_tag_prefix("GrandpaEquivocation") // We assign the maximum priority for any equivocation report. @@ -257,10 +254,7 @@ fn is_known_offence( key_owner_proof: &T::KeyOwnerProof, ) -> Result<(), TransactionValidityError> { // check the membership proof to extract the offender's id - let key = ( - sp_finality_grandpa::KEY_TYPE, - equivocation_proof.offender().clone(), - ); + let key = (sp_finality_grandpa::KEY_TYPE, equivocation_proof.offender().clone()); let offender = T::KeyOwnerProofSystem::check_proof(key, key_owner_proof.clone()) .ok_or(InvalidTransaction::BadProof)?; diff --git a/substrate/frame/grandpa/src/lib.rs b/substrate/frame/grandpa/src/lib.rs index 2d10e3c96b14daf3de27f427f256e3797089b07e..184ab4960874778c010ad95cd14506991e796138 100644 --- a/substrate/frame/grandpa/src/lib.rs +++ b/substrate/frame/grandpa/src/lib.rs @@ -41,18 +41,16 @@ use fg_primitives::{ }; use frame_support::{ dispatch::DispatchResultWithPostInfo, - storage, traits::{OneSessionHandler, KeyOwnerProofSystem}, weights::{Pays, Weight}, -}; -use sp_runtime::{ - generic::DigestItem, - traits::Zero, - DispatchResult, KeyTypeId, + storage, + traits::{KeyOwnerProofSystem, OneSessionHandler}, + weights::{Pays, Weight}, }; +use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult, KeyTypeId}; use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::SessionIndex; -mod equivocation; mod default_weights; +mod equivocation; pub mod migrations; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -71,9 +69,9 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -130,24 +128,20 @@ pub mod pallet { ScheduledChange { delay: pending_change.delay, next_authorities: pending_change.next_authorities.clone(), - } + }, )) } else { - Self::deposit_log(ConsensusLog::ScheduledChange( - ScheduledChange { - delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), - } - )); + Self::deposit_log(ConsensusLog::ScheduledChange(ScheduledChange { + delay: pending_change.delay, + next_authorities: pending_change.next_authorities.clone(), + })); } } // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { Self::set_grandpa_authorities(&pending_change.next_authorities); - Self::deposit_event( - Event::NewAuthorities(pending_change.next_authorities) - ); + Self::deposit_event(Event::NewAuthorities(pending_change.next_authorities)); >::kill(); } } @@ -197,11 +191,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let reporter = ensure_signed(origin)?; - Self::do_report_equivocation( - Some(reporter), - equivocation_proof, - key_owner_proof, - ) + Self::do_report_equivocation(Some(reporter), equivocation_proof, key_owner_proof) } /// Report voter equivocation/misbehavior. This method will verify the @@ -289,7 +279,8 @@ pub mod pallet { /// State of the current authority set. #[pallet::storage] #[pallet::getter(fn state)] - pub(super) type State = StorageValue<_, StoredState, ValueQuery, DefaultForState>; + pub(super) type State = + StorageValue<_, StoredState, ValueQuery, DefaultForState>; /// Pending change: (signaled at, scheduled change). #[pallet::storage] @@ -328,9 +319,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { - authorities: Default::default(), - } + Self { authorities: Default::default() } } } @@ -388,7 +377,7 @@ pub enum StoredState { /// Block at which the intention to pause was scheduled. scheduled_at: N, /// Number of blocks after which the change will be enacted. - delay: N + delay: N, }, /// The current GRANDPA authority set is paused. Paused, @@ -410,10 +399,7 @@ impl Pallet { /// Set the current set of authorities, along with their respective weights. fn set_grandpa_authorities(authorities: &AuthorityList) { - storage::unhashed::put( - GRANDPA_AUTHORITIES_KEY, - &VersionedAuthorityList::from(authorities), - ); + storage::unhashed::put(GRANDPA_AUTHORITIES_KEY, &VersionedAuthorityList::from(authorities)); } /// Schedule GRANDPA to pause starting in the given number of blocks. @@ -421,10 +407,7 @@ impl Pallet { pub fn schedule_pause(in_blocks: T::BlockNumber) -> DispatchResult { if let StoredState::Live = >::get() { let scheduled_at = >::block_number(); - >::put(StoredState::PendingPause { - delay: in_blocks, - scheduled_at, - }); + >::put(StoredState::PendingPause { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -436,10 +419,7 @@ impl Pallet { pub fn schedule_resume(in_blocks: T::BlockNumber) -> DispatchResult { if let StoredState::Paused = >::get() { let scheduled_at = >::block_number(); - >::put(StoredState::PendingResume { - delay: in_blocks, - scheduled_at, - }); + >::put(StoredState::PendingResume { delay: in_blocks, scheduled_at }); Ok(()) } else { @@ -502,10 +482,7 @@ impl Pallet { // config builder or through `on_genesis_session`. fn initialize(authorities: &AuthorityList) { if !authorities.is_empty() { - assert!( - Self::grandpa_authorities().is_empty(), - "Authorities are already initialized!" - ); + assert!(Self::grandpa_authorities().is_empty(), "Authorities are already initialized!"); Self::set_grandpa_authorities(authorities); } @@ -530,16 +507,16 @@ impl Pallet { let validator_count = key_owner_proof.validator_count(); // validate the key ownership proof extracting the id of the offender. - let offender = - T::KeyOwnerProofSystem::check_proof( - (fg_primitives::KEY_TYPE, equivocation_proof.offender().clone()), - key_owner_proof, - ).ok_or(Error::::InvalidKeyOwnershipProof)?; + let offender = T::KeyOwnerProofSystem::check_proof( + (fg_primitives::KEY_TYPE, equivocation_proof.offender().clone()), + key_owner_proof, + ) + .ok_or(Error::::InvalidKeyOwnershipProof)?; // validate equivocation proof (check votes are different and // signatures are valid). if !sp_finality_grandpa::check_equivocation_proof(equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } // fetch the current and previous sets last session index. on the @@ -547,8 +524,8 @@ impl Pallet { let previous_set_id_session_index = if set_id == 0 { None } else { - let session_index = - Self::session_for_set(set_id - 1).ok_or_else(|| Error::::InvalidEquivocationProof)?; + let session_index = Self::session_for_set(set_id - 1) + .ok_or_else(|| Error::::InvalidEquivocationProof)?; Some(session_index) }; @@ -560,10 +537,10 @@ impl Pallet { // bounds of the set id reported in the equivocation. if session_index > set_id_session_index || previous_set_id_session_index - .map(|previous_index| session_index <= previous_index) - .unwrap_or(false) + .map(|previous_index| session_index <= previous_index) + .unwrap_or(false) { - return Err(Error::::InvalidEquivocationProof.into()); + return Err(Error::::InvalidEquivocationProof.into()) } // report to the offences module rewarding the sender. @@ -576,7 +553,8 @@ impl Pallet { set_id, round, ), - ).map_err(|_| Error::::DuplicateOffenceReport)?; + ) + .map_err(|_| Error::::DuplicateOffenceReport)?; // waive the fee since the report is valid and beneficial Ok(Pays::No.into()) @@ -610,19 +588,22 @@ impl sp_runtime::BoundToRuntimeAppPublic for Pallet { } impl OneSessionHandler for Pallet - where T: pallet_session::Config +where + T: pallet_session::Config, { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); Self::initialize(&authorities); } fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) - where I: Iterator + where + I: Iterator, { // Always issue a change if `session` says that the validators have changed. // Even if their session keys are the same as before, the underlying economic diff --git a/substrate/frame/grandpa/src/migrations/v3_1.rs b/substrate/frame/grandpa/src/migrations/v3_1.rs index fc626578098dade755b88a48f20cc61932d43792..c2ab9d3b7f6650d04a5ec3e1d42ccdbd6219684b 100644 --- a/substrate/frame/grandpa/src/migrations/v3_1.rs +++ b/substrate/frame/grandpa/src/migrations/v3_1.rs @@ -16,8 +16,8 @@ // limitations under the License. use frame_support::{ + traits::{Get, GetPalletVersion, PalletVersion}, weights::Weight, - traits::{GetPalletVersion, PalletVersion, Get}, }; use sp_io::hashing::twox_128; @@ -31,18 +31,15 @@ pub const OLD_PREFIX: &[u8] = b"GrandpaFinality"; /// `::PalletInfo::name::`. /// /// The old storage prefix, `GrandpaFinality` is hardcoded in the migration code. -pub fn migrate< - T: frame_system::Config, - P: GetPalletVersion, - N: AsRef, ->(new_pallet_name: N) -> Weight { - +pub fn migrate>( + new_pallet_name: N, +) -> Weight { if new_pallet_name.as_ref().as_bytes() == OLD_PREFIX { log::info!( target: "runtime::afg", "New pallet name is equal to the old prefix. No migration needs to be done.", ); - return 0; + return 0 } let maybe_storage_version =

{ pub token: syn::token::$tok, @@ -46,7 +49,7 @@ macro_rules! groups_impl { fn parse(input: ParseStream) -> Result { let syn::group::$name { token, content } = syn::group::$parse(input)?; let content = content.parse()?; - Ok($name { token, content, }) + Ok($name { token, content }) } } @@ -60,12 +63,12 @@ macro_rules! groups_impl { } } - impl Clone for $name

{ + impl Clone for $name

{ fn clone(&self) -> Self { Self { token: self.token.clone(), content: self.content.clone() } } } - } + }; } groups_impl!(Braces, Brace, Brace, parse_braces); @@ -73,23 +76,22 @@ groups_impl!(Brackets, Bracket, Bracket, parse_brackets); groups_impl!(Parens, Paren, Parenthesis, parse_parens); #[derive(Debug)] -pub struct PunctuatedInner { - pub inner: syn::punctuated::Punctuated, +pub struct PunctuatedInner { + pub inner: syn::punctuated::Punctuated, pub variant: V, } #[derive(Debug, Clone)] pub struct NoTrailing; - #[derive(Debug, Clone)] pub struct Trailing; -pub type Punctuated = PunctuatedInner; +pub type Punctuated = PunctuatedInner; -pub type PunctuatedTrailing = PunctuatedInner; +pub type PunctuatedTrailing = PunctuatedInner; -impl Parse for PunctuatedInner { +impl Parse for PunctuatedInner { fn parse(input: ParseStream) -> Result { Ok(PunctuatedInner { inner: syn::punctuated::Punctuated::parse_separated_nonempty(input)?, @@ -98,7 +100,7 @@ impl Parse for PunctuatedInner Parse for PunctuatedInner { +impl Parse for PunctuatedInner { fn parse(input: ParseStream) -> Result { Ok(PunctuatedInner { inner: syn::punctuated::Punctuated::parse_terminated(input)?, @@ -107,13 +109,13 @@ impl Parse for PunctuatedInner { } } -impl ToTokens for PunctuatedInner { +impl ToTokens for PunctuatedInner { fn to_tokens(&self, tokens: &mut TokenStream) { self.inner.to_tokens(tokens) } } -impl Clone for PunctuatedInner { +impl Clone for PunctuatedInner { fn clone(&self) -> Self { Self { inner: self.inner.clone(), variant: self.variant.clone() } } @@ -127,9 +129,7 @@ pub struct Meta { impl Parse for Meta { fn parse(input: ParseStream) -> Result { - Ok(Meta { - inner: syn::Meta::parse(input)?, - }) + Ok(Meta { inner: syn::Meta::parse(input)? }) } } @@ -151,9 +151,7 @@ pub struct OuterAttributes { impl Parse for OuterAttributes { fn parse(input: ParseStream) -> Result { let inner = syn::Attribute::parse_outer(input)?; - Ok(OuterAttributes { - inner, - }) + Ok(OuterAttributes { inner }) } } @@ -189,13 +187,11 @@ struct ContainsIdent<'a> { impl<'ast> ContainsIdent<'ast> { fn visit_tokenstream(&mut self, stream: TokenStream) { - stream.into_iter().for_each(|tt| - match tt { - TokenTree::Ident(id) => self.visit_ident(&id), - TokenTree::Group(ref group) => self.visit_tokenstream(group.stream()), - _ => {} - } - ) + stream.into_iter().for_each(|tt| match tt { + TokenTree::Ident(id) => self.visit_ident(&id), + TokenTree::Group(ref group) => self.visit_tokenstream(group.stream()), + _ => {}, + }) } fn visit_ident(&mut self, ident: &Ident) { @@ -218,10 +214,7 @@ impl<'ast> Visit<'ast> for ContainsIdent<'ast> { /// Check if a `Type` contains the given `Ident`. pub fn type_contains_ident(typ: &syn::Type, ident: &Ident) -> bool { - let mut visit = ContainsIdent { - result: false, - ident, - }; + let mut visit = ContainsIdent { result: false, ident }; visit::visit_type(&mut visit, typ); visit.result @@ -229,10 +222,7 @@ pub fn type_contains_ident(typ: &syn::Type, ident: &Ident) -> bool { /// Check if a `Expr` contains the given `Ident`. pub fn expr_contains_ident(expr: &syn::Expr, ident: &Ident) -> bool { - let mut visit = ContainsIdent { - result: false, - ident, - }; + let mut visit = ContainsIdent { result: false, ident }; visit::visit_expr(&mut visit, expr); visit.result diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 12c1161a6a6c19f4c71e56323890b9bb84935ba7..d962f6e00d70512cb5c707193105268a5bc1766b 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -18,20 +18,26 @@ //! Dispatch system. Contains a macro for defining runtime modules and //! generating values representing lazy module function calls. -pub use crate::sp_std::{result, fmt, prelude::{Vec, Clone, Eq, PartialEq}, marker}; -pub use crate::codec::{Codec, EncodeLike, Decode, Encode, Input, Output, HasCompact, EncodeAsRef}; -pub use frame_metadata::{ - FunctionMetadata, DecodeDifferent, DecodeDifferentArray, FunctionArgumentMetadata, - ModuleConstantMetadata, DefaultByte, DefaultByteGetter, ModuleErrorMetadata, ErrorMetadata +pub use crate::{ + codec::{Codec, Decode, Encode, EncodeAsRef, EncodeLike, HasCompact, Input, Output}, + sp_std::{ + fmt, marker, + prelude::{Clone, Eq, PartialEq, Vec}, + result, + }, + traits::{ + CallMetadata, GetCallMetadata, GetCallName, GetPalletVersion, UnfilteredDispatchable, + }, + weights::{ + ClassifyDispatch, DispatchInfo, GetDispatchInfo, PaysFee, PostDispatchInfo, + TransactionPriority, WeighData, Weight, WithPostDispatchInfo, + }, }; -pub use crate::weights::{ - GetDispatchInfo, DispatchInfo, WeighData, ClassifyDispatch, TransactionPriority, Weight, - PaysFee, PostDispatchInfo, WithPostDispatchInfo, +pub use frame_metadata::{ + DecodeDifferent, DecodeDifferentArray, DefaultByte, DefaultByteGetter, ErrorMetadata, + FunctionArgumentMetadata, FunctionMetadata, ModuleConstantMetadata, ModuleErrorMetadata, }; pub use sp_runtime::{traits::Dispatchable, DispatchError}; -pub use crate::traits::{ - CallMetadata, GetCallMetadata, GetCallName, UnfilteredDispatchable, GetPalletVersion, -}; /// The return typ of a `Dispatchable` in frame. When returned explicitly from /// a dispatchable function it allows overriding the default `PostDispatchInfo` @@ -2331,7 +2337,6 @@ macro_rules! __call_to_functions { }; } - /// Convert a list of functions into a list of `FunctionMetadata` items. #[macro_export] #[doc(hidden)] @@ -2465,13 +2470,19 @@ macro_rules! __check_reserved_fn_name { #[allow(dead_code)] mod tests { use super::*; - use crate::weights::{DispatchInfo, DispatchClass, Pays, RuntimeDbWeight}; - use crate::traits::{ - GetCallName, OnInitialize, OnFinalize, OnIdle, OnRuntimeUpgrade, - IntegrityTest, Get, PalletInfo, + use crate::{ + traits::{ + Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, + PalletInfo, + }, + weights::{DispatchClass, DispatchInfo, Pays, RuntimeDbWeight}, }; - pub trait Config: system::Config + Sized where Self::AccountId: From { } + pub trait Config: system::Config + Sized + where + Self::AccountId: From, + { + } pub mod system { use super::*; @@ -2546,18 +2557,14 @@ mod tests { FunctionMetadata { name: DecodeDifferent::Encode("aux_0"), arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ - " Hi, this is a comment." - ]) + documentation: DecodeDifferent::Encode(&[" Hi, this is a comment."]), }, FunctionMetadata { name: DecodeDifferent::Encode("aux_1"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("Compact") - } - ]), + arguments: DecodeDifferent::Encode(&[FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("Compact"), + }]), documentation: DecodeDifferent::Encode(&[]), }, FunctionMetadata { @@ -2570,7 +2577,7 @@ mod tests { FunctionArgumentMetadata { name: DecodeDifferent::Encode("_data2"), ty: DecodeDifferent::Encode("String"), - } + }, ]), documentation: DecodeDifferent::Encode(&[]), }, @@ -2581,12 +2588,10 @@ mod tests { }, FunctionMetadata { name: DecodeDifferent::Encode("aux_4"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - } - ]), + arguments: DecodeDifferent::Encode(&[FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }]), documentation: DecodeDifferent::Encode(&[]), }, FunctionMetadata { @@ -2598,8 +2603,8 @@ mod tests { }, FunctionArgumentMetadata { name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("Compact") - } + ty: DecodeDifferent::Encode("Compact"), + }, ]), documentation: DecodeDifferent::Encode(&[]), }, @@ -2611,7 +2616,7 @@ mod tests { ]; pub struct TraitImpl {} - impl Config for TraitImpl { } + impl Config for TraitImpl {} type Test = Module; @@ -2679,7 +2684,6 @@ mod tests { } } - impl system::Config for TraitImpl { type Origin = OuterOrigin; type AccountId = u32; @@ -2760,9 +2764,9 @@ mod tests { #[test] fn on_runtime_upgrade_should_work() { - sp_io::TestExternalities::default().execute_with(|| + sp_io::TestExternalities::default().execute_with(|| { assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10) - ); + }); } #[test] @@ -2788,7 +2792,10 @@ mod tests { #[test] fn get_call_names() { let call_names = Call::::get_call_names(); - assert_eq!(["aux_0", "aux_1", "aux_2", "aux_3", "aux_4", "aux_5", "operational"], call_names); + assert_eq!( + ["aux_0", "aux_1", "aux_2", "aux_3", "aux_4", "aux_5", "operational"], + call_names + ); } #[test] diff --git a/substrate/frame/support/src/error.rs b/substrate/frame/support/src/error.rs index 508de49e949c2a3bffb7ebd131efa467225ba45f..f0c6ba0f3b1c79c27d7527bbb43fd7ee850f9640 100644 --- a/substrate/frame/support/src/error.rs +++ b/substrate/frame/support/src/error.rs @@ -18,9 +18,9 @@ //! Macro for declaring a module error. #[doc(hidden)] -pub use sp_runtime::traits::{LookupError, BadOrigin}; +pub use frame_metadata::{DecodeDifferent, ErrorMetadata, ModuleErrorMetadata}; #[doc(hidden)] -pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; +pub use sp_runtime::traits::{BadOrigin, LookupError}; /// Declare an error type for a runtime module. /// diff --git a/substrate/frame/support/src/event.rs b/substrate/frame/support/src/event.rs index a1e5609e67ef49fbffe4282c02b5394103919913..6e0d4ba6b47bd0c58b27d7308338be24bde8f0e6 100644 --- a/substrate/frame/support/src/event.rs +++ b/substrate/frame/support/src/event.rs @@ -21,7 +21,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnEncode}; +pub use frame_metadata::{DecodeDifferent, EventMetadata, FnEncode, OuterEventMetadata}; /// Implement the `Event` for a module. /// @@ -35,7 +35,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// } /// ); /// -///# fn main() {} +/// # fn main() {} /// ``` /// /// # Generic Event Example: @@ -75,7 +75,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// ); /// } /// -///# fn main() {} +/// # fn main() {} /// ``` /// /// The syntax for generic events requires the `where`. @@ -83,9 +83,9 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// # Generic Event with Instance Example: /// /// ```rust -///# struct DefaultInstance; -///# trait Instance {} -///# impl Instance for DefaultInstance {} +/// # struct DefaultInstance; +/// # trait Instance {} +/// # impl Instance for DefaultInstance {} /// trait Config { /// type Balance; /// type Token; @@ -100,7 +100,7 @@ pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnE /// Message(Balance, Token), /// } /// ); -///# fn main() {} +/// # fn main() {} /// ``` #[macro_export] macro_rules! decl_event { @@ -337,8 +337,8 @@ macro_rules! __events_to_metadata { #[allow(dead_code)] mod tests { use super::*; + use codec::{Decode, Encode}; use serde::Serialize; - use codec::{Encode, Decode}; mod system { pub trait Config: 'static { @@ -414,9 +414,10 @@ mod tests { decl_event!( /// Event with renamed generic parameter - pub enum Event where + pub enum Event + where BalanceRenamed = ::Balance, - OriginRenamed = ::Origin + OriginRenamed = ::Origin, { TestEvent(BalanceRenamed), TestOrigin(OriginRenamed), @@ -467,15 +468,13 @@ mod tests { decl_event!( /// Event finish formatting on an named one with trailing comma - pub enum Event where + pub enum Event + where BalanceRenamed = ::Balance, OriginRenamed = ::Origin, { TestEvent(BalanceRenamed, OriginRenamed), - TrailingCommaInArgs( - u32, - u32, - ), + TrailingCommaInArgs(u32, u32), } ); } @@ -505,26 +504,24 @@ mod tests { fn event_metadata() { assert_eq!( system_renamed::Event::metadata(), - &[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - ] + &[EventMetadata { + name: DecodeDifferent::Encode("SystemEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + },] ); assert_eq!( event_module::Event::::metadata(), &[ EventMetadata { name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "Balance", "Origin" ]), - documentation: DecodeDifferent::Encode(&[ " Hi, I am a comment." ]) + arguments: DecodeDifferent::Encode(&["Balance", "Origin"]), + documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]) }, EventMetadata { name: DecodeDifferent::Encode("EventWithoutParams"), arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ " Dog" ]), + documentation: DecodeDifferent::Encode(&[" Dog"]), }, ] ); @@ -533,25 +530,23 @@ mod tests { &[ EventMetadata { name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "BalanceRenamed" ]), + arguments: DecodeDifferent::Encode(&["BalanceRenamed"]), documentation: DecodeDifferent::Encode(&[]) }, EventMetadata { name: DecodeDifferent::Encode("TestOrigin"), - arguments: DecodeDifferent::Encode(&[ "OriginRenamed" ]), + arguments: DecodeDifferent::Encode(&["OriginRenamed"]), documentation: DecodeDifferent::Encode(&[]), }, ] ); assert_eq!( event_module3::Event::metadata(), - &[ - EventMetadata { - name: DecodeDifferent::Encode("HiEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ], + &[EventMetadata { + name: DecodeDifferent::Encode("HiEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]) + }], ); } } diff --git a/substrate/frame/support/src/hash.rs b/substrate/frame/support/src/hash.rs index 1425760051d255e295150e5c74ed6eedbba62858..4136bd518f4c2d2eb677e71a682c7bdf3b303985 100644 --- a/substrate/frame/support/src/hash.rs +++ b/substrate/frame/support/src/hash.rs @@ -18,8 +18,8 @@ //! Hash utilities. use codec::{Codec, MaxEncodedLen}; +use sp_io::hashing::{blake2_128, blake2_256, twox_128, twox_256, twox_64}; use sp_std::prelude::Vec; -use sp_io::hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256}; // This trait must be kept coherent with frame-support-procedural HasherKind usage pub trait Hashable: Sized { @@ -51,7 +51,9 @@ impl Hashable for T { fn twox_64_concat(&self) -> Vec { self.using_encoded(Twox64Concat::hash) } - fn identity(&self) -> Vec { self.encode() } + fn identity(&self) -> Vec { + self.encode() + } } /// Hasher to use to hash keys to insert to storage. @@ -98,11 +100,7 @@ impl StorageHasher for Twox64Concat { const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Twox64Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { - twox_64(x) - .iter() - .chain(x.into_iter()) - .cloned() - .collect::>() + twox_64(x).iter().chain(x.into_iter()).cloned().collect::>() } fn max_len() -> usize { K::max_encoded_len().saturating_add(8) @@ -124,11 +122,7 @@ impl StorageHasher for Blake2_128Concat { const METADATA: frame_metadata::StorageHasher = frame_metadata::StorageHasher::Blake2_128Concat; type Output = Vec; fn hash(x: &[u8]) -> Vec { - blake2_128(x) - .iter() - .chain(x.into_iter()) - .cloned() - .collect::>() + blake2_128(x).iter().chain(x.into_iter()).cloned().collect::>() } fn max_len() -> usize { K::max_encoded_len().saturating_add(16) diff --git a/substrate/frame/support/src/inherent.rs b/substrate/frame/support/src/inherent.rs index cccbbbaa517ceb69953d1ba9c58b6c5dc1d12531..2125f3e7f50a72ede72b308497a504754e944a12 100644 --- a/substrate/frame/support/src/inherent.rs +++ b/substrate/frame/support/src/inherent.rs @@ -15,13 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[doc(hidden)] -pub use crate::sp_std::vec::Vec; #[doc(hidden)] pub use crate::sp_runtime::traits::{Block as BlockT, Extrinsic}; +#[doc(hidden)] +pub use crate::sp_std::vec::Vec; pub use sp_inherents::{ - InherentData, CheckInherentsResult, IsFatalError, InherentIdentifier, MakeFatalError, + CheckInherentsResult, InherentData, InherentIdentifier, IsFatalError, MakeFatalError, }; /// A pallet that provides or verifies an inherent extrinsic. @@ -53,7 +53,9 @@ pub trait ProvideInherent { /// one inherent for which: /// * type is [`Self::Call`], /// * [`Self::is_inherent`] returns true. - fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(None) } + fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { + Ok(None) + } /// Check whether the given inherent is valid. Checking the inherent is optional and can be /// omitted by using the default implementation. diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 34836dd5518e6e137c500f6f533263cff051f5b8..0cdaadbdae3a2441fa4fea73cfe34325e5089970 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -25,42 +25,42 @@ extern crate self as frame_support; #[doc(hidden)] pub use sp_tracing; -#[cfg(feature = "std")] -pub use serde; -pub use sp_core::Void; -#[doc(hidden)] -pub use sp_std; #[doc(hidden)] pub use codec; +#[doc(hidden)] +pub use frame_metadata as metadata; +#[doc(hidden)] +pub use log; #[cfg(feature = "std")] #[doc(hidden)] pub use once_cell; #[doc(hidden)] pub use paste; #[cfg(feature = "std")] +pub use serde; +pub use sp_core::Void; #[doc(hidden)] -pub use sp_state_machine::BasicExternalities; -#[doc(hidden)] -pub use sp_io::{storage::root as storage_root, self}; +pub use sp_io::{self, storage::root as storage_root}; #[doc(hidden)] pub use sp_runtime::RuntimeDebug; +#[cfg(feature = "std")] #[doc(hidden)] -pub use log; +pub use sp_state_machine::BasicExternalities; #[doc(hidden)] -pub use frame_metadata as metadata; +pub use sp_std; #[macro_use] pub mod dispatch; -pub mod storage; mod hash; +pub mod storage; #[macro_use] pub mod event; pub mod inherent; #[macro_use] pub mod error; +pub mod instances; pub mod traits; pub mod weights; -pub mod instances; #[doc(hidden)] pub mod unsigned { @@ -68,23 +68,27 @@ pub mod unsigned { pub use crate::sp_runtime::traits::ValidateUnsigned; #[doc(hidden)] pub use crate::sp_runtime::transaction_validity::{ - TransactionValidity, UnknownTransaction, TransactionValidityError, TransactionSource, + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, }; } -pub use self::hash::{ - Twox256, Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, Hashable, - StorageHasher, ReversibleStorageHasher -}; -pub use self::storage::{ - StorageValue, StorageMap, StorageDoubleMap, StorageNMap, StoragePrefixedMap, - IterableStorageMap, IterableStorageDoubleMap, IterableStorageNMap, migration, - bounded_vec::{BoundedVec, BoundedSlice}, weak_bounded_vec::WeakBoundedVec, +pub use self::{ + dispatch::{Callable, Parameter}, + hash::{ + Blake2_128, Blake2_128Concat, Blake2_256, Hashable, Identity, ReversibleStorageHasher, + StorageHasher, Twox128, Twox256, Twox64Concat, + }, + storage::{ + bounded_vec::{BoundedSlice, BoundedVec}, + migration, + weak_bounded_vec::WeakBoundedVec, + IterableStorageDoubleMap, IterableStorageMap, IterableStorageNMap, StorageDoubleMap, + StorageMap, StorageNMap, StoragePrefixedMap, StorageValue, + }, }; -pub use self::dispatch::{Parameter, Callable}; -pub use sp_runtime::{self, ConsensusEngineId, print, traits::Printable}; +pub use sp_runtime::{self, print, traits::Printable, ConsensusEngineId}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_runtime::TypeId; /// A unified log target for support operations. @@ -108,14 +112,14 @@ impl TypeId for PalletId { /// /// Useful for creating a *storage-like* struct for test and migrations. /// -///``` +/// ``` /// # use frame_support::generate_storage_alias; /// use frame_support::codec; /// use frame_support::Twox64Concat; /// // generate a storage value with type u32. /// generate_storage_alias!(Prefix, StorageName => Value); /// -/// // generate a double map from `(u32, u32)` (with hashers `Twox64Concat` for each key) +/// // generate a double map from `(u32, u32)` (with hashers `Twox64Concat` for each key) /// // to `Vec` /// generate_storage_alias!( /// OtherPrefix, OtherStorageName => DoubleMap< @@ -534,7 +538,7 @@ pub fn debug(data: &impl sp_std::fmt::Debug) { #[doc(inline)] pub use frame_support_procedural::{ - decl_storage, construct_runtime, transactional, RuntimeDebugNoBound, + construct_runtime, decl_storage, transactional, RuntimeDebugNoBound, }; #[doc(hidden)] @@ -546,14 +550,14 @@ pub use frame_support_procedural::__generate_dummy_part_checker; /// ``` /// # use frame_support::CloneNoBound; /// trait Config { -/// type C: Clone; +/// type C: Clone; /// } /// /// // Foo implements [`Clone`] because `C` bounds [`Clone`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Clone`]. /// #[derive(CloneNoBound)] /// struct Foo { -/// c: T::C, +/// c: T::C, /// } /// ``` pub use frame_support_procedural::CloneNoBound; @@ -564,14 +568,14 @@ pub use frame_support_procedural::CloneNoBound; /// ``` /// # use frame_support::{EqNoBound, PartialEqNoBound}; /// trait Config { -/// type C: Eq; +/// type C: Eq; /// } /// /// // Foo implements [`Eq`] because `C` bounds [`Eq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Eq`]. /// #[derive(PartialEqNoBound, EqNoBound)] /// struct Foo { -/// c: T::C, +/// c: T::C, /// } /// ``` pub use frame_support_procedural::EqNoBound; @@ -582,14 +586,14 @@ pub use frame_support_procedural::EqNoBound; /// ``` /// # use frame_support::PartialEqNoBound; /// trait Config { -/// type C: PartialEq; +/// type C: PartialEq; /// } /// /// // Foo implements [`PartialEq`] because `C` bounds [`PartialEq`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`PartialEq`]. /// #[derive(PartialEqNoBound)] /// struct Foo { -/// c: T::C, +/// c: T::C, /// } /// ``` pub use frame_support_procedural::PartialEqNoBound; @@ -601,14 +605,14 @@ pub use frame_support_procedural::PartialEqNoBound; /// # use frame_support::DebugNoBound; /// # use core::fmt::Debug; /// trait Config { -/// type C: Debug; +/// type C: Debug; /// } /// /// // Foo implements [`Debug`] because `C` bounds [`Debug`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Debug`]. /// #[derive(DebugNoBound)] /// struct Foo { -/// c: T::C, +/// c: T::C, /// } /// ``` pub use frame_support_procedural::DebugNoBound; @@ -620,14 +624,14 @@ pub use frame_support_procedural::DebugNoBound; /// # use frame_support::DefaultNoBound; /// # use core::default::Default; /// trait Config { -/// type C: Default; +/// type C: Default; /// } /// /// // Foo implements [`Default`] because `C` bounds [`Default`]. /// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Default`]. /// #[derive(DefaultNoBound)] /// struct Foo { -/// c: T::C, +/// c: T::C, /// } /// ``` pub use frame_support_procedural::DefaultNoBound; @@ -684,8 +688,8 @@ pub use frame_support_procedural::crate_to_pallet_version; #[macro_export] macro_rules! fail { ( $y:expr ) => {{ - return Err($y.into()); - }} + return Err($y.into()) + }}; } /// Evaluate `$x:expr` and if not true return `Err($y:expr)`. @@ -697,7 +701,7 @@ macro_rules! ensure { if !$x { $crate::fail!($y); } - }} + }}; } /// Evaluate an expression, assert it returns an expected `Err` value and that @@ -713,7 +717,7 @@ macro_rules! assert_noop { let h = $crate::storage_root(); $crate::assert_err!($x, $y); assert_eq!(h, $crate::storage_root()); - } + }; } /// Evaluate any expression and assert that runtime storage has not been mutated @@ -728,7 +732,7 @@ macro_rules! assert_storage_noop { let h = $crate::storage_root(); $x; assert_eq!(h, $crate::storage_root()); - } + }; } /// Assert an expression returns an error specified. @@ -738,7 +742,7 @@ macro_rules! assert_storage_noop { macro_rules! assert_err { ( $x:expr , $y:expr $(,)? ) => { assert_eq!($x, Err($y.into())); - } + }; } /// Assert an expression returns an error specified. @@ -749,7 +753,7 @@ macro_rules! assert_err { macro_rules! assert_err_ignore_postinfo { ( $x:expr , $y:expr $(,)? ) => { $crate::assert_err!($x.map(|_| ()).map_err(|e| e.error), $y); - } + }; } /// Assert an expression returns error with the given weight. @@ -762,7 +766,7 @@ macro_rules! assert_err_with_weight { } else { panic!("expected Err(_), got Ok(_).") } - } + }; } /// Panic if an expression doesn't evaluate to `Ok`. @@ -780,23 +784,23 @@ macro_rules! assert_ok { }; ( $x:expr, $y:expr $(,)? ) => { assert_eq!($x, Ok($y)); - } + }; } #[cfg(feature = "std")] #[doc(hidden)] -pub use serde::{Serialize, Deserialize}; +pub use serde::{Deserialize, Serialize}; #[cfg(test)] pub mod tests { use super::*; use codec::{Codec, EncodeLike}; use frame_metadata::{ - DecodeDifferent, StorageEntryMetadata, StorageMetadata, StorageEntryType, - StorageEntryModifier, DefaultByteGetter, StorageHasher, + DecodeDifferent, DefaultByteGetter, StorageEntryMetadata, StorageEntryModifier, + StorageEntryType, StorageHasher, StorageMetadata, }; - use sp_std::{marker::PhantomData, result}; use sp_io::TestExternalities; + use sp_std::{marker::PhantomData, result}; /// A PalletInfo implementation which just panics. pub struct PanicPalletInfo; @@ -864,7 +868,9 @@ pub mod tests { type Map = Data; - trait Sorted { fn sorted(self) -> Self; } + trait Sorted { + fn sorted(self) -> Self; + } impl Sorted for Vec { fn sorted(mut self) -> Self { self.sort(); @@ -918,13 +924,15 @@ pub mod tests { DataDM::insert(1, 0, 2); DataDM::insert(1, 1, 3); - let get_all = || vec![ - DataDM::get(0, 1), - DataDM::get(1, 0), - DataDM::get(1, 1), - DataDM::get(2, 0), - DataDM::get(2, 1), - ]; + let get_all = || { + vec![ + DataDM::get(0, 1), + DataDM::get(1, 0), + DataDM::get(1, 1), + DataDM::get(2, 0), + DataDM::get(2, 1), + ] + }; assert_eq!(get_all(), vec![1, 2, 3, 0, 0]); // Two existing @@ -990,15 +998,24 @@ pub mod tests { Map::mutate(&key, |val| { *val = 15; }); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 15)]); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 15)] + ); Map::mutate(&key, |val| { *val = 17; }); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 17)]); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 17)] + ); // remove first Map::remove(&key); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43)]); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43)] + ); // remove last from the list Map::remove(&(key - 2)); @@ -1049,7 +1066,6 @@ pub mod tests { assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); - }); } @@ -1100,10 +1116,13 @@ pub mod tests { assert_eq!(DoubleMap::get(&key1, key2), 1); // no-op if `Err` - assert_noop!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { - *v = Some(2); - Err("nah") - }), "nah"); + assert_noop!( + DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { + *v = Some(2); + Err("nah") + }), + "nah" + ); // removed if mutated to`None` assert_ok!(DoubleMap::try_mutate_exists(key1, key2, |v| -> TestResult { @@ -1116,126 +1135,124 @@ pub mod tests { const EXPECTED_METADATA: StorageMetadata = StorageMetadata { prefix: DecodeDifferent::Encode("Test"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("Data"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Twox64Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u64"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + entries: DecodeDifferent::Encode(&[ + StorageEntryMetadata { + name: DecodeDifferent::Encode("Data"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Twox64Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u64"), + unused: false, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("OptionLinkedMap"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u32"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructOptionLinkedMap(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructData( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("OptionLinkedMap"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u32"), + unused: false, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Identity, - key: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - unused: false - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructOptionLinkedMap(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericData"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Identity, + key: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + unused: false, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData2"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - unused: false - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericData( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericData2"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + unused: false, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("DataDM"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Twox64Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u64"), - key2_hasher: StorageHasher::Blake2_128Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericData2( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("DataDM"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Twox64Concat, + key1: DecodeDifferent::Encode("u32"), + key2: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u64"), + key2_hasher: StorageHasher::Blake2_128Concat, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericDataDM"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Identity, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructDataDM( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericDataDM"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("T::BlockNumber"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + key2_hasher: StorageHasher::Identity, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData2DM"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Twox64Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericDataDM( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericData2DM"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("T::BlockNumber"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + key2_hasher: StorageHasher::Twox64Concat, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("AppendableDM"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("Vec"), - key2_hasher: StorageHasher::Blake2_128Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGenericData2DM(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("AppendableDM"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("u32"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("Vec"), + key2_hasher: StorageHasher::Blake2_128Concat, }, - ] - ), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGenericData2DM(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), }; #[test] @@ -1269,35 +1286,38 @@ pub mod tests { /// Prelude to be used alongside pallet macro, for ease of use. pub mod pallet_prelude { - pub use sp_std::marker::PhantomData; #[cfg(feature = "std")] pub use crate::traits::GenesisBuild; pub use crate::{ - EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, DebugNoBound, CloneNoBound, Twox256, - Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, ensure, - RuntimeDebug, storage, + dispatch::{DispatchError, DispatchResult, DispatchResultWithPostInfo, Parameter}, + ensure, + inherent::{InherentData, InherentIdentifier, ProvideInherent}, + storage, + storage::{ + bounded_vec::BoundedVec, + types::{ + Key as NMapKey, OptionQuery, StorageDoubleMap, StorageMap, StorageNMap, + StorageValue, ValueQuery, + }, + }, traits::{ - Get, Hooks, IsType, GetPalletVersion, EnsureOrigin, PalletInfoAccess, StorageInfoTrait, - ConstU32, GetDefault, + ConstU32, EnsureOrigin, Get, GetDefault, GetPalletVersion, Hooks, IsType, + PalletInfoAccess, StorageInfoTrait, }, - dispatch::{DispatchResultWithPostInfo, Parameter, DispatchError, DispatchResult}, weights::{DispatchClass, Pays, Weight}, - storage::types::{ - Key as NMapKey, StorageDoubleMap, StorageMap, StorageNMap, StorageValue, ValueQuery, - OptionQuery, - }, - storage::bounded_vec::BoundedVec, + Blake2_128, Blake2_128Concat, Blake2_256, CloneNoBound, DebugNoBound, EqNoBound, Identity, + PartialEqNoBound, RuntimeDebug, RuntimeDebugNoBound, Twox128, Twox256, Twox64Concat, }; - pub use codec::{Encode, Decode, MaxEncodedLen}; - pub use crate::inherent::{InherentData, InherentIdentifier, ProvideInherent}; + pub use codec::{Decode, Encode, MaxEncodedLen}; pub use sp_runtime::{ traits::{MaybeSerializeDeserialize, Member, ValidateUnsigned}, transaction_validity::{ - TransactionSource, TransactionValidity, ValidTransaction, TransactionPriority, - TransactionTag, TransactionLongevity, TransactionValidityError, InvalidTransaction, - UnknownTransaction, + InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, + TransactionTag, TransactionValidity, TransactionValidityError, UnknownTransaction, + ValidTransaction, }, }; + pub use sp_std::marker::PhantomData; } /// `pallet` attribute macro allows to define a pallet to be used in `construct_runtime!`. @@ -1321,9 +1341,9 @@ pub mod pallet_prelude { /// ```ignore /// #[pallet] /// pub mod pallet { -/// use frame_support::pallet_prelude::*; -/// use frame_system::pallet_prelude::*; -/// ... +/// use frame_support::pallet_prelude::*; +/// use frame_system::pallet_prelude::*; +/// ... /// } /// ``` /// @@ -1350,8 +1370,8 @@ pub mod pallet_prelude { /// ```ignore /// #[pallet::config] /// pub trait Config: frame_system::Config { -/// #[pallet::constant] -/// type Foo: Get; +/// #[pallet::constant] +/// type Foo: Get; /// } /// ``` /// @@ -2378,5 +2398,4 @@ pub mod pallet_prelude { /// } /// ``` /// * use the newest nightly possible. -/// pub use frame_support_procedural::pallet; diff --git a/substrate/frame/support/src/storage/bounded_btree_map.rs b/substrate/frame/support/src/storage/bounded_btree_map.rs index 7b3efbfbeee5f6b6cb116c9711b6f412140c59be..f8ea35ae584d78c6db0ed21a98e25bbea13369c1 100644 --- a/substrate/frame/support/src/storage/bounded_btree_map.rs +++ b/substrate/frame/support/src/storage/bounded_btree_map.rs @@ -17,15 +17,12 @@ //! Traits, types and structs to support a bounded BTreeMap. +use crate::{storage::StorageDecodeLength, traits::Get}; +use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{ borrow::Borrow, collections::btree_map::BTreeMap, convert::TryFrom, fmt, marker::PhantomData, ops::Deref, }; -use crate::{ - storage::StorageDecodeLength, - traits::Get, -}; -use codec::{Encode, Decode, MaxEncodedLen}; /// A bounded map based on a B-Tree. /// @@ -46,7 +43,7 @@ where fn decode(input: &mut I) -> Result { let inner = BTreeMap::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedBTreeMap exceeds its limit".into()); + return Err("BoundedBTreeMap exceeds its limit".into()) } Ok(Self(inner, PhantomData)) } @@ -280,7 +277,9 @@ where type Error = (); fn try_from(value: BTreeMap) -> Result { - (value.len() <= Self::bound()).then(move || BoundedBTreeMap(value, PhantomData)).ok_or(()) + (value.len() <= Self::bound()) + .then(move || BoundedBTreeMap(value, PhantomData)) + .ok_or(()) } } @@ -303,9 +302,9 @@ impl codec::EncodeLike> for BoundedBTreeMap whe #[cfg(test)] pub mod test { use super::*; + use crate::Twox128; use sp_io::TestExternalities; use sp_std::convert::TryInto; - use crate::Twox128; crate::parameter_types! { pub const Seven: u32 = 7; diff --git a/substrate/frame/support/src/storage/bounded_btree_set.rs b/substrate/frame/support/src/storage/bounded_btree_set.rs index 461b1de58ec814abd97b7a27d3ed941d51e4b8d0..182884e655dd2a3a4de8934dc56d157fa86f7a95 100644 --- a/substrate/frame/support/src/storage/bounded_btree_set.rs +++ b/substrate/frame/support/src/storage/bounded_btree_set.rs @@ -17,15 +17,12 @@ //! Traits, types and structs to support a bounded `BTreeSet`. +use crate::{storage::StorageDecodeLength, traits::Get}; +use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{ borrow::Borrow, collections::btree_set::BTreeSet, convert::TryFrom, fmt, marker::PhantomData, ops::Deref, }; -use crate::{ - storage::StorageDecodeLength, - traits::Get, -}; -use codec::{Encode, Decode, MaxEncodedLen}; /// A bounded set based on a B-Tree. /// @@ -45,7 +42,7 @@ where fn decode(input: &mut I) -> Result { let inner = BTreeSet::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedBTreeSet exceeds its limit".into()); + return Err("BoundedBTreeSet exceeds its limit".into()) } Ok(Self(inner, PhantomData)) } @@ -266,7 +263,9 @@ where type Error = (); fn try_from(value: BTreeSet) -> Result { - (value.len() <= Self::bound()).then(move || BoundedBTreeSet(value, PhantomData)).ok_or(()) + (value.len() <= Self::bound()) + .then(move || BoundedBTreeSet(value, PhantomData)) + .ok_or(()) } } @@ -281,16 +280,14 @@ impl codec::DecodeLength for BoundedBTreeSet { impl StorageDecodeLength for BoundedBTreeSet {} -impl codec::EncodeLike> for BoundedBTreeSet where - BTreeSet: Encode -{} +impl codec::EncodeLike> for BoundedBTreeSet where BTreeSet: Encode {} #[cfg(test)] pub mod test { use super::*; + use crate::Twox128; use sp_io::TestExternalities; use sp_std::convert::TryInto; - use crate::Twox128; crate::parameter_types! { pub const Seven: u32 = 7; diff --git a/substrate/frame/support/src/storage/bounded_vec.rs b/substrate/frame/support/src/storage/bounded_vec.rs index b5b5252f9ec41f1459b4d6b4d775efaaeba92402..6d25e058c0f4c04e8c5ad64a9b367b6a1fec9749 100644 --- a/substrate/frame/support/src/storage/bounded_vec.rs +++ b/substrate/frame/support/src/storage/bounded_vec.rs @@ -18,17 +18,16 @@ //! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map //! or a double map. -use sp_std::prelude::*; -use sp_std::{convert::TryFrom, fmt, marker::PhantomData}; -use codec::{Encode, Decode, EncodeLike, MaxEncodedLen}; +use crate::{ + storage::{StorageDecodeLength, StorageTryAppend}, + traits::Get, +}; +use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -use crate::{ - traits::Get, - storage::{StorageDecodeLength, StorageTryAppend}, -}; +use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; /// A bounded vector. /// @@ -71,7 +70,7 @@ impl> Decode for BoundedVec { fn decode(input: &mut I) -> Result { let inner = Vec::::decode(input)?; if inner.len() > S::get() as usize { - return Err("BoundedVec exceeds its limit".into()); + return Err("BoundedVec exceeds its limit".into()) } Ok(Self(inner, PhantomData)) } @@ -341,9 +340,9 @@ where #[cfg(test)] pub mod test { use super::*; + use crate::Twox128; use sp_io::TestExternalities; use sp_std::convert::TryInto; - use crate::Twox128; crate::parameter_types! { pub const Seven: u32 = 7; diff --git a/substrate/frame/support/src/storage/child.rs b/substrate/frame/support/src/storage/child.rs index 52830c8ac5dc8914181dadf97c23fa0452ac6471..4b237aaa561fd90358bb48dcb1f611c7faa2107c 100644 --- a/substrate/frame/support/src/storage/child.rs +++ b/substrate/frame/support/src/storage/child.rs @@ -21,23 +21,17 @@ // NOTE: could replace unhashed by having only one kind of storage (top trie being the child info // of null length parent storage key). +pub use crate::sp_io::KillStorageResult; use crate::sp_std::prelude::*; -use codec::{Codec, Encode, Decode}; +use codec::{Codec, Decode, Encode}; pub use sp_core::storage::{ChildInfo, ChildType}; -pub use crate::sp_io::KillStorageResult; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. -pub fn get( - child_info: &ChildInfo, - key: &[u8], -) -> Option { +pub fn get(child_info: &ChildInfo, key: &[u8]) -> Option { match child_info.child_type() { ChildType::ParentKeyId => { let storage_key = child_info.storage_key(); - sp_io::default_child_storage::get( - storage_key, - key, - ).and_then(|v| { + sp_io::default_child_storage::get(storage_key, key).and_then(|v| { Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { // TODO #3700: error should be handleable. crate::runtime_print!( @@ -54,20 +48,13 @@ pub fn get( /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. -pub fn get_or_default( - child_info: &ChildInfo, - key: &[u8], -) -> T { +pub fn get_or_default(child_info: &ChildInfo, key: &[u8]) -> T { get(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. -pub fn get_or( - child_info: &ChildInfo, - key: &[u8], - default_value: T, -) -> T { +pub fn get_or(child_info: &ChildInfo, key: &[u8], default_value: T) -> T { get(child_info, key).unwrap_or(default_value) } @@ -82,27 +69,16 @@ pub fn get_or_else T>( } /// Put `value` in storage under `key`. -pub fn put( - child_info: &ChildInfo, - key: &[u8], - value: &T, -) { +pub fn put(child_info: &ChildInfo, key: &[u8], value: &T) { match child_info.child_type() { - ChildType::ParentKeyId => value.using_encoded(|slice| - sp_io::default_child_storage::set( - child_info.storage_key(), - key, - slice, - ) - ), + ChildType::ParentKeyId => value.using_encoded(|slice| { + sp_io::default_child_storage::set(child_info.storage_key(), key, slice) + }), } } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. -pub fn take( - child_info: &ChildInfo, - key: &[u8], -) -> Option { +pub fn take(child_info: &ChildInfo, key: &[u8]) -> Option { let r = get(child_info, key); if r.is_some() { kill(child_info, key); @@ -112,20 +88,13 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. -pub fn take_or_default( - child_info: &ChildInfo, - key: &[u8], -) -> T { +pub fn take_or_default(child_info: &ChildInfo, key: &[u8]) -> T { take(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. -pub fn take_or( - child_info: &ChildInfo, - key: &[u8], - default_value: T, -) -> T { +pub fn take_or(child_info: &ChildInfo, key: &[u8], default_value: T) -> T { take(child_info, key).unwrap_or(default_value) } @@ -140,15 +109,11 @@ pub fn take_or_else T>( } /// Check to see if `key` has an explicit entry in storage. -pub fn exists( - child_info: &ChildInfo, - key: &[u8], -) -> bool { +pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::read( - child_info.storage_key(), - key, &mut [0;0][..], 0, - ).is_some(), + ChildType::ParentKeyId => + sp_io::default_child_storage::read(child_info.storage_key(), key, &mut [0; 0][..], 0) + .is_some(), } } @@ -171,86 +136,50 @@ pub fn exists( /// not make much sense because it is not cumulative when called inside the same block. /// Use this function to distribute the deletion of a single child trie across multiple /// blocks. -pub fn kill_storage( - child_info: &ChildInfo, - limit: Option, -) -> KillStorageResult { +pub fn kill_storage(child_info: &ChildInfo, limit: Option) -> KillStorageResult { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( - child_info.storage_key(), - limit - ), + ChildType::ParentKeyId => + sp_io::default_child_storage::storage_kill(child_info.storage_key(), limit), } } /// Ensure `key` has no explicit entry in storage. -pub fn kill( - child_info: &ChildInfo, - key: &[u8], -) { +pub fn kill(child_info: &ChildInfo, key: &[u8]) { match child_info.child_type() { ChildType::ParentKeyId => { - sp_io::default_child_storage::clear( - child_info.storage_key(), - key, - ); + sp_io::default_child_storage::clear(child_info.storage_key(), key); }, } } /// Get a Vec of bytes from storage. -pub fn get_raw( - child_info: &ChildInfo, - key: &[u8], -) -> Option> { +pub fn get_raw(child_info: &ChildInfo, key: &[u8]) -> Option> { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::get( - child_info.storage_key(), - key, - ), + ChildType::ParentKeyId => sp_io::default_child_storage::get(child_info.storage_key(), key), } } /// Put a raw byte slice into storage. -pub fn put_raw( - child_info: &ChildInfo, - key: &[u8], - value: &[u8], -) { +pub fn put_raw(child_info: &ChildInfo, key: &[u8], value: &[u8]) { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::set( - child_info.storage_key(), - key, - value, - ), + ChildType::ParentKeyId => + sp_io::default_child_storage::set(child_info.storage_key(), key, value), } } /// Calculate current child root value. -pub fn root( - child_info: &ChildInfo, -) -> Vec { +pub fn root(child_info: &ChildInfo) -> Vec { match child_info.child_type() { - ChildType::ParentKeyId => sp_io::default_child_storage::root( - child_info.storage_key(), - ), + ChildType::ParentKeyId => sp_io::default_child_storage::root(child_info.storage_key()), } } /// Return the length in bytes of the value without reading it. `None` if it does not exist. -pub fn len( - child_info: &ChildInfo, - key: &[u8], -) -> Option { +pub fn len(child_info: &ChildInfo, key: &[u8]) -> Option { match child_info.child_type() { ChildType::ParentKeyId => { let mut buffer = [0; 0]; - sp_io::default_child_storage::read( - child_info.storage_key(), - key, - &mut buffer, - 0, - ) - } + sp_io::default_child_storage::read(child_info.storage_key(), key, &mut buffer, 0) + }, } } diff --git a/substrate/frame/support/src/storage/generator/double_map.rs b/substrate/frame/support/src/storage/generator/double_map.rs index 71d8ca3c043a66ccfdb85f7f7505af9ef22db141..3a68fe740ab08c76c100e18480325df97269f5e5 100644 --- a/substrate/frame/support/src/storage/generator/double_map.rs +++ b/substrate/frame/support/src/storage/generator/double_map.rs @@ -15,11 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_std::prelude::*; -use sp_std::borrow::Borrow; -use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike}; -use crate::{storage::{self, unhashed, KeyPrefixIterator, StorageAppend, PrefixIterator}, Never}; -use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; +use crate::{ + hash::{ReversibleStorageHasher, StorageHasher, Twox128}, + storage::{self, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, + Never, +}; +use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; +use sp_std::{borrow::Borrow, prelude::*}; /// Generator for `StorageDoubleMap` used by `decl_storage`. /// @@ -63,9 +65,8 @@ pub trait StorageDoubleMap { let module_prefix_hashed = Twox128::hash(Self::module_prefix()); let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let mut result = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() - ); + let mut result = + Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len()); result.extend_from_slice(&module_prefix_hashed[..]); result.extend_from_slice(&storage_prefix_hashed[..]); @@ -80,7 +81,8 @@ pub trait StorageDoubleMap { fn from_query_to_optional_value(v: Self::Query) -> Option; /// Generate the first part of the key used in top storage. - fn storage_double_map_final_key1(k1: KArg1) -> Vec where + fn storage_double_map_final_key1(k1: KArg1) -> Vec + where KArg1: EncodeLike, { let module_prefix_hashed = Twox128::hash(Self::module_prefix()); @@ -88,7 +90,7 @@ pub trait StorageDoubleMap { let key_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() + module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), ); final_key.extend_from_slice(&module_prefix_hashed[..]); @@ -99,7 +101,8 @@ pub trait StorageDoubleMap { } /// Generate the full key used in top storage. - fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec where + fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec + where KArg1: EncodeLike, KArg2: EncodeLike, { @@ -109,10 +112,10 @@ pub trait StorageDoubleMap { let key2_hashed = k2.borrow().using_encoded(Self::Hasher2::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() - + storage_prefix_hashed.len() - + key1_hashed.as_ref().len() - + key2_hashed.as_ref().len() + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key1_hashed.as_ref().len() + + key2_hashed.as_ref().len(), ); final_key.extend_from_slice(&module_prefix_hashed[..]); @@ -124,7 +127,8 @@ pub trait StorageDoubleMap { } } -impl storage::StorageDoubleMap for G where +impl storage::StorageDoubleMap for G +where K1: FullEncode, K2: FullEncode, V: FullCodec, @@ -132,21 +136,24 @@ impl storage::StorageDoubleMap for G where { type Query = G::Query; - fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec where + fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec + where KArg1: EncodeLike, KArg2: EncodeLike, { Self::storage_double_map_final_key(k1, k2) } - fn contains_key(k1: KArg1, k2: KArg2) -> bool where + fn contains_key(k1: KArg1, k2: KArg2) -> bool + where KArg1: EncodeLike, KArg2: EncodeLike, { unhashed::exists(&Self::storage_double_map_final_key(k1, k2)) } - fn get(k1: KArg1, k2: KArg2) -> Self::Query where + fn get(k1: KArg1, k2: KArg2) -> Self::Query + where KArg1: EncodeLike, KArg2: EncodeLike, { @@ -156,11 +163,13 @@ impl storage::StorageDoubleMap for G where fn try_get(k1: KArg1, k2: KArg2) -> Result where KArg1: EncodeLike, - KArg2: EncodeLike { + KArg2: EncodeLike, + { unhashed::get(&Self::storage_double_map_final_key(k1, k2)).ok_or(()) } - fn take(k1: KArg1, k2: KArg2) -> Self::Query where + fn take(k1: KArg1, k2: KArg2) -> Self::Query + where KArg1: EncodeLike, KArg2: EncodeLike, { @@ -170,16 +179,12 @@ impl storage::StorageDoubleMap for G where G::from_optional_value_to_query(value) } - fn swap( - x_k1: XKArg1, - x_k2: XKArg2, - y_k1: YKArg1, - y_k2: YKArg2 - ) where + fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) + where XKArg1: EncodeLike, XKArg2: EncodeLike, YKArg1: EncodeLike, - YKArg2: EncodeLike + YKArg2: EncodeLike, { let final_x_key = Self::storage_double_map_final_key(x_k1, x_k2); let final_y_key = Self::storage_double_map_final_key(y_k1, y_k2); @@ -197,7 +202,8 @@ impl storage::StorageDoubleMap for G where } } - fn insert(k1: KArg1, k2: KArg2, val: VArg) where + fn insert(k1: KArg1, k2: KArg2, val: VArg) + where KArg1: EncodeLike, KArg2: EncodeLike, VArg: EncodeLike, @@ -205,7 +211,8 @@ impl storage::StorageDoubleMap for G where unhashed::put(&Self::storage_double_map_final_key(k1, k2), &val.borrow()) } - fn remove(k1: KArg1, k2: KArg2) where + fn remove(k1: KArg1, k2: KArg2) + where KArg1: EncodeLike, KArg2: EncodeLike, { @@ -213,12 +220,15 @@ impl storage::StorageDoubleMap for G where } fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult - where KArg1: EncodeLike { + where + KArg1: EncodeLike, + { unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref(), limit) } - fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator where - KArg1: ?Sized + EncodeLike + fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator + where + KArg1: ?Sized + EncodeLike, { let prefix = Self::storage_double_map_final_key1(k1); storage::PrefixIterator { @@ -229,12 +239,14 @@ impl storage::StorageDoubleMap for G where } } - fn mutate(k1: KArg1, k2: KArg2, f: F) -> R where + fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where KArg1: EncodeLike, KArg2: EncodeLike, F: FnOnce(&mut Self::Query) -> R, { - Self::try_mutate(k1, k2, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + Self::try_mutate(k1, k2, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } fn mutate_exists(k1: KArg1, k2: KArg2, f: F) -> R @@ -243,10 +255,12 @@ impl storage::StorageDoubleMap for G where KArg2: EncodeLike, F: FnOnce(&mut Option) -> R, { - Self::try_mutate_exists(k1, k2, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + Self::try_mutate_exists(k1, k2, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } - fn try_mutate(k1: KArg1, k2: KArg2, f: F) -> Result where + fn try_mutate(k1: KArg1, k2: KArg2, f: F) -> Result + where KArg1: EncodeLike, KArg2: EncodeLike, F: FnOnce(&mut Self::Query) -> Result, @@ -283,11 +297,8 @@ impl storage::StorageDoubleMap for G where ret } - fn append( - k1: KArg1, - k2: KArg2, - item: EncodeLikeItem, - ) where + fn append(k1: KArg1, k2: KArg2, item: EncodeLikeItem) + where KArg1: EncodeLike, KArg2: EncodeLike, Item: Encode, @@ -303,7 +314,10 @@ impl storage::StorageDoubleMap for G where OldHasher2: StorageHasher, KeyArg1: EncodeLike, KeyArg2: EncodeLike, - >(key1: KeyArg1, key2: KeyArg2) -> Option { + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option { let old_key = { let module_prefix_hashed = Twox128::hash(Self::module_prefix()); let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); @@ -311,10 +325,10 @@ impl storage::StorageDoubleMap for G where let key2_hashed = key2.borrow().using_encoded(OldHasher2::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() - + storage_prefix_hashed.len() - + key1_hashed.as_ref().len() - + key2_hashed.as_ref().len() + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key1_hashed.as_ref().len() + + key2_hashed.as_ref().len(), ); final_key.extend_from_slice(&module_prefix_hashed[..]); @@ -331,14 +345,11 @@ impl storage::StorageDoubleMap for G where } } -impl< - K1: FullCodec, - K2: FullCodec, - V: FullCodec, - G: StorageDoubleMap, -> storage::IterableStorageDoubleMap for G where +impl> + storage::IterableStorageDoubleMap for G +where G::Hasher1: ReversibleStorageHasher, - G::Hasher2: ReversibleStorageHasher + G::Hasher2: ReversibleStorageHasher, { type PartialKeyIterator = KeyPrefixIterator; type PrefixIterator = PrefixIterator<(K2, V)>; @@ -367,7 +378,7 @@ impl< closure: |raw_key_without_prefix| { let mut key_material = G::Hasher2::reverse(raw_key_without_prefix); K2::decode(&mut key_material) - } + }, } } @@ -405,7 +416,7 @@ impl< let mut k2_material = G::Hasher2::reverse(k1_k2_material); let k2 = K2::decode(&mut k2_material)?; Ok((k1, k2)) - } + }, } } @@ -418,8 +429,8 @@ impl< fn translate Option>(mut f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); - while let Some(next) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix)) + while let Some(next) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { previous_key = next; let value = match unhashed::get::(&previous_key) { @@ -458,11 +469,11 @@ impl< /// Test iterators for StorageDoubleMap #[cfg(test)] mod test_iterators { - use codec::{Encode, Decode}; use crate::{ hash::StorageHasher, - storage::{generator::StorageDoubleMap, IterableStorageDoubleMap, unhashed}, + storage::{generator::StorageDoubleMap, unhashed, IterableStorageDoubleMap}, }; + use codec::{Decode, Encode}; pub trait Config: 'static { type Origin; @@ -521,10 +532,7 @@ mod test_iterators { vec![(3, 3), (0, 0), (2, 2), (1, 1)], ); - assert_eq!( - DoubleMap::iter_values().collect::>(), - vec![3, 0, 2, 1], - ); + assert_eq!(DoubleMap::iter_values().collect::>(), vec![3, 0, 2, 1],); assert_eq!( DoubleMap::drain().collect::>(), @@ -551,15 +559,9 @@ mod test_iterators { vec![(1, 1), (2, 2), (0, 0), (3, 3)], ); - assert_eq!( - DoubleMap::iter_key_prefix(k1).collect::>(), - vec![1, 2, 0, 3], - ); + assert_eq!(DoubleMap::iter_key_prefix(k1).collect::>(), vec![1, 2, 0, 3],); - assert_eq!( - DoubleMap::iter_prefix_values(k1).collect::>(), - vec![1, 2, 0, 3], - ); + assert_eq!(DoubleMap::iter_prefix_values(k1).collect::>(), vec![1, 2, 0, 3],); assert_eq!( DoubleMap::drain_prefix(k1).collect::>(), @@ -580,15 +582,12 @@ mod test_iterators { } // Wrong key1 - unhashed::put( - &[prefix.clone(), vec![1, 2, 3]].concat(), - &3u64.encode() - ); + unhashed::put(&[prefix.clone(), vec![1, 2, 3]].concat(), &3u64.encode()); // Wrong key2 unhashed::put( &[prefix.clone(), crate::Blake2_128Concat::hash(&1u16.encode())].concat(), - &3u64.encode() + &3u64.encode(), ); // Wrong value @@ -597,11 +596,12 @@ mod test_iterators { prefix.clone(), crate::Blake2_128Concat::hash(&1u16.encode()), crate::Twox64Concat::hash(&2u32.encode()), - ].concat(), + ] + .concat(), &vec![1], ); - DoubleMap::translate(|_k1, _k2, v: u64| Some(v*2)); + DoubleMap::translate(|_k1, _k2, v: u64| Some(v * 2)); assert_eq!( DoubleMap::iter().collect::>(), vec![(3, 3, 6), (0, 0, 0), (2, 2, 4), (1, 1, 2)], diff --git a/substrate/frame/support/src/storage/generator/map.rs b/substrate/frame/support/src/storage/generator/map.rs index e58a001c679fdf6e0f314bc850029a0c648f7788..48593dba17bd2b4648239a210cb6373ee3e31630 100644 --- a/substrate/frame/support/src/storage/generator/map.rs +++ b/substrate/frame/support/src/storage/generator/map.rs @@ -15,14 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(not(feature = "std"))] -use sp_std::prelude::*; -use sp_std::borrow::Borrow; -use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike}; use crate::{ - storage::{self, unhashed, KeyPrefixIterator, StorageAppend, PrefixIterator}, - Never, hash::{StorageHasher, Twox128, ReversibleStorageHasher}, + hash::{ReversibleStorageHasher, StorageHasher, Twox128}, + storage::{self, unhashed, KeyPrefixIterator, PrefixIterator, StorageAppend}, + Never, }; +use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; +use sp_std::borrow::Borrow; +#[cfg(not(feature = "std"))] +use sp_std::prelude::*; /// Generator for `StorageMap` used by `decl_storage`. /// @@ -54,9 +55,8 @@ pub trait StorageMap { let module_prefix_hashed = Twox128::hash(Self::module_prefix()); let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let mut result = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() - ); + let mut result = + Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len()); result.extend_from_slice(&module_prefix_hashed[..]); result.extend_from_slice(&storage_prefix_hashed[..]); @@ -71,7 +71,8 @@ pub trait StorageMap { fn from_query_to_optional_value(v: Self::Query) -> Option; /// Generate the full key used in top storage. - fn storage_map_final_key(key: KeyArg) -> Vec where + fn storage_map_final_key(key: KeyArg) -> Vec + where KeyArg: EncodeLike, { let module_prefix_hashed = Twox128::hash(Self::module_prefix()); @@ -79,7 +80,7 @@ pub trait StorageMap { let key_hashed = key.borrow().using_encoded(Self::Hasher::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() + module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), ); final_key.extend_from_slice(&module_prefix_hashed[..]); @@ -98,11 +99,9 @@ pub struct StorageMapIterator { _phantom: ::sp_std::marker::PhantomData<(K, V, Hasher)>, } -impl< - K: Decode + Sized, - V: Decode + Sized, - Hasher: ReversibleStorageHasher -> Iterator for StorageMapIterator { +impl Iterator + for StorageMapIterator +{ type Item = (K, V); fn next(&mut self) -> Option<(K, V)> { @@ -117,27 +116,25 @@ impl< if self.drain { unhashed::kill(&self.previous_key) } - let mut key_material = Hasher::reverse(&self.previous_key[self.prefix.len()..]); + let mut key_material = + Hasher::reverse(&self.previous_key[self.prefix.len()..]); match K::decode(&mut key_material) { Ok(key) => Some((key, value)), Err(_) => continue, } - } + }, None => continue, } - } + }, None => None, } } } } -impl< - K: FullCodec, - V: FullCodec, - G: StorageMap, -> storage::IterableStorageMap for G where - G::Hasher: ReversibleStorageHasher +impl> storage::IterableStorageMap for G +where + G::Hasher: ReversibleStorageHasher, { type Iterator = PrefixIterator<(K, V)>; type KeyIterator = KeyPrefixIterator; @@ -166,7 +163,7 @@ impl< closure: |raw_key_without_prefix| { let mut key_material = G::Hasher::reverse(raw_key_without_prefix); K::decode(&mut key_material) - } + }, } } @@ -180,8 +177,8 @@ impl< fn translate Option>(mut f: F) { let prefix = G::prefix_hash(); let mut previous_key = prefix.clone(); - while let Some(next) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix)) + while let Some(next) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { previous_key = next; let value = match unhashed::get::(&previous_key) { @@ -254,16 +251,21 @@ impl> storage::StorageMap } fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R { - Self::try_mutate(key, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + Self::try_mutate(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } - fn mutate_exists, R, F: FnOnce(&mut Option) -> R>(key: KeyArg, f: F) -> R { - Self::try_mutate_exists(key, |v| Ok::(f(v))).expect("`Never` can not be constructed; qed") + fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R { + Self::try_mutate_exists(key, |v| Ok::(f(v))) + .expect("`Never` can not be constructed; qed") } fn try_mutate, R, E, F: FnOnce(&mut Self::Query) -> Result>( key: KeyArg, - f: F + f: F, ) -> Result { let final_key = Self::storage_map_final_key(key); let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); @@ -280,7 +282,7 @@ impl> storage::StorageMap fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( key: KeyArg, - f: F + f: F, ) -> Result { let final_key = Self::storage_map_final_key(key); let mut val = unhashed::get(final_key.as_ref()); @@ -319,7 +321,9 @@ impl> storage::StorageMap let key_hashed = key.borrow().using_encoded(OldHasher::hash); let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key_hashed.as_ref().len(), ); final_key.extend_from_slice(&module_prefix_hashed[..]); @@ -338,11 +342,11 @@ impl> storage::StorageMap /// Test iterators for StorageMap #[cfg(test)] mod test_iterators { - use codec::{Encode, Decode}; use crate::{ hash::StorageHasher, - storage::{generator::StorageMap, IterableStorageMap, unhashed}, + storage::{generator::StorageMap, unhashed, IterableStorageMap}, }; + use codec::{Decode, Encode}; pub trait Config: 'static { type Origin; @@ -421,7 +425,7 @@ mod test_iterators { &vec![1], ); - Map::translate(|_k1, v: u64| Some(v*2)); + Map::translate(|_k1, v: u64| Some(v * 2)); assert_eq!(Map::iter().collect::>(), vec![(3, 6), (0, 0), (2, 4), (1, 2)]); }) } diff --git a/substrate/frame/support/src/storage/generator/mod.rs b/substrate/frame/support/src/storage/generator/mod.rs index 578831314c1f615be8f891bba1de4294a3784493..86129091b7ef203d9efd1f73943352cf21a875e1 100644 --- a/substrate/frame/support/src/storage/generator/mod.rs +++ b/substrate/frame/support/src/storage/generator/mod.rs @@ -24,23 +24,25 @@ //! //! This is internal api and is subject to change. +mod double_map; mod map; mod nmap; -mod double_map; mod value; +pub use double_map::StorageDoubleMap; pub use map::StorageMap; pub use nmap::StorageNMap; -pub use double_map::StorageDoubleMap; pub use value::StorageValue; #[cfg(test)] #[allow(dead_code)] mod tests { - use sp_io::TestExternalities; + use crate::{ + assert_noop, assert_ok, + storage::{generator::StorageValue, unhashed, IterableStorageMap}, + }; use codec::Encode; - use crate::storage::{unhashed, generator::StorageValue, IterableStorageMap}; - use crate::{assert_noop, assert_ok}; + use sp_io::TestExternalities; struct Runtime; @@ -80,7 +82,7 @@ mod tests { // translate let translate_fn = |old: Option| -> Option<(u64, u64)> { - old.map(|o| (o.into(), (o*2).into())) + old.map(|o| (o.into(), (o * 2).into())) }; let res = Value::translate(translate_fn); debug_assert!(res.is_ok()); @@ -105,11 +107,16 @@ mod tests { ); // do translation. - NumberMap::translate(|k: u32, v: u64| if k % 2 == 0 { Some((k as u64) << 32 | v) } else { None }); + NumberMap::translate( + |k: u32, v: u64| if k % 2 == 0 { Some((k as u64) << 32 | v) } else { None }, + ); assert_eq!( NumberMap::iter().collect::>(), - (0..50u32).map(|x| x * 2).map(|x| (x, (x as u64) << 32 | x as u64)).collect::>(), + (0..50u32) + .map(|x| x * 2) + .map(|x| (x, (x as u64) << 32 | x as u64)) + .collect::>(), ); }) } @@ -123,20 +130,29 @@ mod tests { assert_eq!(DoubleMap::get(0, 0), 0); // `assert_noop` ensures that the state does not change - assert_noop!(Value::try_mutate(|value| -> Result<(), &'static str> { - *value = (2, 2); - Err("don't change value") - }), "don't change value"); + assert_noop!( + Value::try_mutate(|value| -> Result<(), &'static str> { + *value = (2, 2); + Err("don't change value") + }), + "don't change value" + ); - assert_noop!(NumberMap::try_mutate(0, |value| -> Result<(), &'static str> { - *value = 4; - Err("don't change value") - }), "don't change value"); + assert_noop!( + NumberMap::try_mutate(0, |value| -> Result<(), &'static str> { + *value = 4; + Err("don't change value") + }), + "don't change value" + ); - assert_noop!(DoubleMap::try_mutate(0, 0, |value| -> Result<(), &'static str> { - *value = 6; - Err("don't change value") - }), "don't change value"); + assert_noop!( + DoubleMap::try_mutate(0, 0, |value| -> Result<(), &'static str> { + *value = 6; + Err("don't change value") + }), + "don't change value" + ); // Showing this explicitly for clarity assert_eq!(Value::get(), (0, 0)); diff --git a/substrate/frame/support/src/storage/generator/nmap.rs b/substrate/frame/support/src/storage/generator/nmap.rs index 49c8c94ea7a941ee5274fb3b30e537b267fb6529..54824c62048cdc79cc6821ddfa5b8eca46f1e3e4 100755 --- a/substrate/frame/support/src/storage/generator/nmap.rs +++ b/substrate/frame/support/src/storage/generator/nmap.rs @@ -228,7 +228,7 @@ where fn try_mutate(key: KArg, f: F) -> Result where KArg: EncodeLikeTuple + TupleToEncodedIter, - F: FnOnce(&mut Self::Query) -> Result + F: FnOnce(&mut Self::Query) -> Result, { let final_key = Self::storage_n_map_final_key::(key); let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); @@ -373,7 +373,7 @@ impl> closure: |raw_key_without_prefix| { let (final_key, _) = K::decode_final_key(raw_key_without_prefix)?; Ok(final_key) - } + }, } } @@ -394,16 +394,16 @@ impl> Some(value) => value, None => { log::error!("Invalid translate: fail to decode old value"); - continue; - } + continue + }, }; let final_key = match K::decode_final_key(&previous_key[prefix.len()..]) { Ok((final_key, _)) => final_key, Err(_) => { log::error!("Invalid translate: fail to decode key"); - continue; - } + continue + }, }; match f(final_key, value) { @@ -452,10 +452,7 @@ mod test_iterators { fn key_after_prefix(mut prefix: Vec) -> Vec { let last = prefix.iter_mut().last().unwrap(); - assert!( - *last != 255, - "mock function not implemented for this prefix" - ); + assert!(*last != 255, "mock function not implemented for this prefix"); *last += 1; prefix } @@ -498,10 +495,7 @@ mod test_iterators { vec![((3, 3), 3), ((0, 0), 0), ((2, 2), 2), ((1, 1), 1)], ); - assert_eq!( - NMap::iter_keys().collect::>(), - vec![(3, 3), (0, 0), (2, 2), (1, 1)], - ); + assert_eq!(NMap::iter_keys().collect::>(), vec![(3, 3), (0, 0), (2, 2), (1, 1)],); assert_eq!(NMap::iter_values().collect::>(), vec![3, 0, 2, 1],); @@ -511,10 +505,7 @@ mod test_iterators { ); assert_eq!(NMap::iter().collect::>(), vec![]); - assert_eq!( - unhashed::get(&key_before_prefix(prefix.clone())), - Some(1u64) - ); + assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); // Prefix iterator @@ -533,15 +524,9 @@ mod test_iterators { vec![(1, 1), (2, 2), (0, 0), (3, 3)], ); - assert_eq!( - NMap::iter_key_prefix((k1,)).collect::>(), - vec![1, 2, 0, 3], - ); + assert_eq!(NMap::iter_key_prefix((k1,)).collect::>(), vec![1, 2, 0, 3],); - assert_eq!( - NMap::iter_prefix_values((k1,)).collect::>(), - vec![1, 2, 0, 3], - ); + assert_eq!(NMap::iter_prefix_values((k1,)).collect::>(), vec![1, 2, 0, 3],); assert_eq!( NMap::drain_prefix((k1,)).collect::>(), @@ -549,10 +534,7 @@ mod test_iterators { ); assert_eq!(NMap::iter_prefix((k1,)).collect::>(), vec![]); - assert_eq!( - unhashed::get(&key_before_prefix(prefix.clone())), - Some(1u64) - ); + assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); // Translate @@ -569,11 +551,7 @@ mod test_iterators { // Wrong key2 unhashed::put( - &[ - prefix.clone(), - crate::Blake2_128Concat::hash(&1u16.encode()), - ] - .concat(), + &[prefix.clone(), crate::Blake2_128Concat::hash(&1u16.encode())].concat(), &3u64.encode(), ); diff --git a/substrate/frame/support/src/storage/generator/value.rs b/substrate/frame/support/src/storage/generator/value.rs index e07c952320aa51f8f41328073c98f4f24cfa9301..c765e059ec149a83bab1b726bd5714bf358038c9 100644 --- a/substrate/frame/support/src/storage/generator/value.rs +++ b/substrate/frame/support/src/storage/generator/value.rs @@ -15,12 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{FullCodec, Encode, EncodeLike, Decode}; use crate::{ - Never, + hash::{StorageHasher, Twox128}, storage::{self, unhashed, StorageAppend}, - hash::{Twox128, StorageHasher}, + Never, }; +use codec::{Decode, Encode, EncodeLike, FullCodec}; /// Generator for `StorageValue` used by `decl_storage`. /// @@ -78,7 +78,8 @@ impl> storage::StorageValue for G { // attempt to get the length directly. let maybe_old = unhashed::get_raw(&key) - .map(|old_data| O::decode(&mut &old_data[..]).map_err(|_| ())).transpose()?; + .map(|old_data| O::decode(&mut &old_data[..]).map_err(|_| ())) + .transpose()?; let maybe_new = f(maybe_old); if let Some(new) = maybe_new.as_ref() { new.using_encoded(|d| unhashed::put_raw(&key, d)); diff --git a/substrate/frame/support/src/storage/hashed.rs b/substrate/frame/support/src/storage/hashed.rs index a0c9ab6708e7fc82cf8ba5dc55e1627a2da1563b..241caff809b3dd64b0047852856ef565514556c0 100644 --- a/substrate/frame/support/src/storage/hashed.rs +++ b/substrate/frame/support/src/storage/hashed.rs @@ -18,8 +18,8 @@ //! Operation on runtime storage using hashed keys. use super::unhashed; +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(hash: &HashFn, key: &[u8]) -> Option diff --git a/substrate/frame/support/src/storage/migration.rs b/substrate/frame/support/src/storage/migration.rs index 62db2eff839fb4757f2cee6ca72bcdd5372ae0dd..701b2627f31c41975eee1a21043b01a6f780bf48 100644 --- a/substrate/frame/support/src/storage/migration.rs +++ b/substrate/frame/support/src/storage/migration.rs @@ -17,10 +17,9 @@ //! Some utilities for helping access storage with arbitrary key types. +use crate::{hash::ReversibleStorageHasher, storage::unhashed, StorageHasher, Twox128}; +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; -use crate::{StorageHasher, Twox128, storage::unhashed}; -use crate::hash::ReversibleStorageHasher; use super::PrefixIterator; @@ -34,14 +33,18 @@ pub struct StorageIterator { impl StorageIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated(note="Please use the storage_iter or storage_iter_with_suffix functions instead")] + #[deprecated( + note = "Please use the storage_iter or storage_iter_with_suffix functions instead" + )] pub fn new(module: &[u8], item: &[u8]) -> Self { #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated(note="Please use the storage_iter or storage_iter_with_suffix functions instead")] + #[deprecated( + note = "Please use the storage_iter or storage_iter_with_suffix functions instead" + )] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); prefix.extend_from_slice(&Twox128::hash(module)); @@ -75,10 +78,10 @@ impl Iterator for StorageIterator { frame_support::storage::unhashed::kill(&next); } Some((self.previous_key[self.prefix.len()..].to_vec(), value)) - } + }, None => continue, } - } + }, None => None, } } @@ -95,14 +98,18 @@ pub struct StorageKeyIterator { impl StorageKeyIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated(note="Please use the storage_key_iter or storage_key_iter_with_suffix functions instead")] + #[deprecated( + note = "Please use the storage_key_iter or storage_key_iter_with_suffix functions instead" + )] pub fn new(module: &[u8], item: &[u8]) -> Self { #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. - #[deprecated(note="Please use the storage_key_iter or storage_key_iter_with_suffix functions instead")] + #[deprecated( + note = "Please use the storage_key_iter or storage_key_iter_with_suffix functions instead" + )] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); prefix.extend_from_slice(&Twox128::hash(module)); @@ -141,13 +148,13 @@ impl Iterator frame_support::storage::unhashed::kill(&next); } Some((key, value)) - } + }, None => continue, } - } + }, Err(_) => continue, } - } + }, None => None, } } @@ -187,7 +194,11 @@ pub fn storage_key_iter( +pub fn storage_key_iter_with_suffix< + K: Decode + Sized, + T: Decode + Sized, + H: ReversibleStorageHasher, +>( module: &[u8], item: &[u8], suffix: &[u8], @@ -279,7 +290,7 @@ pub fn take_storage_item pub fn move_storage_from_pallet( storage_name: &[u8], old_pallet_name: &[u8], - new_pallet_name: &[u8] + new_pallet_name: &[u8], ) { let mut new_prefix = Vec::new(); new_prefix.extend_from_slice(&Twox128::hash(new_pallet_name)); @@ -347,18 +358,14 @@ pub fn move_prefix(from_prefix: &[u8], to_prefix: &[u8]) { #[cfg(test)] mod tests { + use super::{ + move_pallet, move_prefix, move_storage_from_pallet, storage_iter, storage_key_iter, + }; use crate::{ - pallet_prelude::{StorageValue, StorageMap, Twox64Concat, Twox128}, hash::StorageHasher, + pallet_prelude::{StorageMap, StorageValue, Twox128, Twox64Concat}, }; use sp_io::TestExternalities; - use super::{ - move_prefix, - move_pallet, - move_storage_from_pallet, - storage_iter, - storage_key_iter, - }; struct OldPalletStorageValuePrefix; impl frame_support::traits::StorageInstance for OldPalletStorageValuePrefix { @@ -459,21 +466,22 @@ mod tests { OldStorageMap::insert(3, 4); assert_eq!( - storage_key_iter::(b"my_old_pallet", b"foo_map").collect::>(), + storage_key_iter::(b"my_old_pallet", b"foo_map") + .collect::>(), vec![(1, 2), (3, 4)], ); assert_eq!( - storage_iter(b"my_old_pallet", b"foo_map").drain().map(|t| t.1).collect::>(), + storage_iter(b"my_old_pallet", b"foo_map") + .drain() + .map(|t| t.1) + .collect::>(), vec![2, 4], ); assert_eq!(OldStorageMap::iter().collect::>(), vec![]); // Empty because storage iterator skips over the entry under the first key - assert_eq!( - storage_iter::(b"my_old_pallet", b"foo_value").drain().next(), - None - ); + assert_eq!(storage_iter::(b"my_old_pallet", b"foo_value").drain().next(), None); assert_eq!(OldStorageValue::get(), Some(3)); }); } diff --git a/substrate/frame/support/src/storage/mod.rs b/substrate/frame/support/src/storage/mod.rs index 65bd9af6c498b8b1874f928f9ae63a646ae77caf..867935003080cdb153266e1831ebfd84a938124c 100644 --- a/substrate/frame/support/src/storage/mod.rs +++ b/substrate/frame/support/src/storage/mod.rs @@ -17,31 +17,31 @@ //! Stuff to do with the runtime's storage. -use sp_core::storage::ChildInfo; -use sp_std::prelude::*; -use codec::{FullCodec, FullEncode, Encode, EncodeLike, Decode}; use crate::{ - hash::{Twox128, StorageHasher, ReversibleStorageHasher}, + hash::{ReversibleStorageHasher, StorageHasher, Twox128}, storage::types::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, KeyGenerator, ReversibleKeyGenerator, TupleToEncodedIter, }, }; +use codec::{Decode, Encode, EncodeLike, FullCodec, FullEncode}; +use sp_core::storage::ChildInfo; use sp_runtime::generic::{Digest, DigestItem}; pub use sp_runtime::TransactionOutcome; +use sp_std::prelude::*; pub use types::Key; -pub mod unhashed; -pub mod hashed; pub mod bounded_btree_map; pub mod bounded_btree_set; pub mod bounded_vec; -pub mod weak_bounded_vec; pub mod child; #[doc(hidden)] pub mod generator; +pub mod hashed; pub mod migration; pub mod types; +pub mod unhashed; +pub mod weak_bounded_vec; #[cfg(all(feature = "std", any(test, debug_assertions)))] mod debug_helper { @@ -101,9 +101,7 @@ pub fn require_transaction() { /// /// Transactions can be nested to any depth. Commits happen to the parent transaction. pub fn with_transaction(f: impl FnOnce() -> TransactionOutcome) -> R { - use sp_io::storage::{ - start_transaction, commit_transaction, rollback_transaction, - }; + use sp_io::storage::{commit_transaction, rollback_transaction, start_transaction}; use TransactionOutcome::*; start_transaction(); @@ -112,8 +110,14 @@ pub fn with_transaction(f: impl FnOnce() -> TransactionOutcome) -> R { let _guard = debug_helper::inc_transaction_level(); match f() { - Commit(res) => { commit_transaction(); res }, - Rollback(res) => { rollback_transaction(); res }, + Commit(res) => { + commit_transaction(); + res + }, + Rollback(res) => { + rollback_transaction(); + res + }, } } @@ -205,7 +209,10 @@ pub trait StorageValue { /// /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. - fn decode_len() -> Option where T: StorageDecodeLength { + fn decode_len() -> Option + where + T: StorageDecodeLength, + { T::decode_len(&Self::hashed_key()) } } @@ -252,7 +259,10 @@ pub trait StorageMap { /// Mutate the value under a key. /// /// Deletes the item if mutated to a `None`. - fn mutate_exists, R, F: FnOnce(&mut Option) -> R>(key: KeyArg, f: F) -> R; + fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R; /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( @@ -292,7 +302,8 @@ pub trait StorageMap { /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. fn decode_len>(key: KeyArg) -> Option - where V: StorageDecodeLength, + where + V: StorageDecodeLength, { V::decode_len(&Self::hashed_key_for(key)) } @@ -337,11 +348,9 @@ pub trait IterableStorageMap: StorageMap { } /// A strongly-typed double map in storage whose secondary keys and values can be iterated over. -pub trait IterableStorageDoubleMap< - K1: FullCodec, - K2: FullCodec, - V: FullCodec ->: StorageDoubleMap { +pub trait IterableStorageDoubleMap: + StorageDoubleMap +{ /// The type that iterates over all `key2`. type PartialKeyIterator: Iterator; @@ -401,19 +410,22 @@ pub trait IterableStorageNMap: StorageN /// remove values whose prefix is `kp` to the map while doing this, you'll get undefined /// results. fn iter_prefix(kp: KP) -> PrefixIterator<(>::Suffix, V)> - where K: HasReversibleKeyPrefix; + where + K: HasReversibleKeyPrefix; /// Enumerate all suffix keys in the map with prefix key `kp` in no particular order. If you /// add or remove values whose prefix is `kp` to the map while doing this, you'll get undefined /// results. fn iter_key_prefix(kp: KP) -> KeyPrefixIterator<>::Suffix> - where K: HasReversibleKeyPrefix; + where + K: HasReversibleKeyPrefix; /// Remove all elements from the map with prefix key `kp` and iterate through them in no /// particular order. If you add elements with prefix key `kp` to the map while doing this, /// you'll get undefined results. fn drain_prefix(kp: KP) -> PrefixIterator<(>::Suffix, V)> - where K: HasReversibleKeyPrefix; + where + K: HasReversibleKeyPrefix; /// Enumerate all elements in the map in no particular order. If you add or remove values to /// the map while doing this, you'll get undefined results. @@ -499,11 +511,13 @@ pub trait StorageDoubleMap { /// Remove all values under the first key. fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult - where KArg1: ?Sized + EncodeLike; + where + KArg1: ?Sized + EncodeLike; /// Iterate over values that share the first key. fn iter_prefix_values(k1: KArg1) -> PrefixIterator - where KArg1: ?Sized + EncodeLike; + where + KArg1: ?Sized + EncodeLike; /// Mutate the value under the given keys. fn mutate(k1: KArg1, k2: KArg2, f: F) -> R @@ -542,11 +556,8 @@ pub trait StorageDoubleMap { /// If the storage item is not encoded properly, the storage will be overwritten /// and set to `[item]`. Any default value set for the storage item will be ignored /// on overwrite. - fn append( - k1: KArg1, - k2: KArg2, - item: EncodeLikeItem, - ) where + fn append(k1: KArg1, k2: KArg2, item: EncodeLikeItem) + where KArg1: EncodeLike, KArg2: EncodeLike, Item: Encode, @@ -566,10 +577,10 @@ pub trait StorageDoubleMap { /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. fn decode_len(key1: KArg1, key2: KArg2) -> Option - where - KArg1: EncodeLike, - KArg2: EncodeLike, - V: StorageDecodeLength, + where + KArg1: EncodeLike, + KArg2: EncodeLike, + V: StorageDecodeLength, { V::decode_len(&Self::hashed_key_for(key1, key2)) } @@ -583,7 +594,10 @@ pub trait StorageDoubleMap { OldHasher2: StorageHasher, KeyArg1: EncodeLike, KeyArg2: EncodeLike, - >(key1: KeyArg1, key2: KeyArg2) -> Option; + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option; } /// An implementation of a map with an arbitrary number of keys. @@ -625,10 +639,13 @@ pub trait StorageNMap { /// Remove all values under the partial prefix key. fn remove_prefix(partial_key: KP, limit: Option) -> sp_io::KillStorageResult - where K: HasKeyPrefix; + where + K: HasKeyPrefix; /// Iterate over values that share the partial prefix key. - fn iter_prefix_values(partial_key: KP) -> PrefixIterator where K: HasKeyPrefix; + fn iter_prefix_values(partial_key: KP) -> PrefixIterator + where + K: HasKeyPrefix; /// Mutate the value under a key. fn mutate(key: KArg, f: F) -> R @@ -741,7 +758,7 @@ impl Iterator for PrefixIterator { self.previous_key, ); continue - } + }, }; if self.drain { unhashed::kill(&self.previous_key) @@ -756,11 +773,11 @@ impl Iterator for PrefixIterator { e, ); continue - } + }, }; Some(item) - } + }, None => None, } } @@ -807,12 +824,12 @@ impl Iterator for KeyPrefixIterator { Ok(item) => return Some(item), Err(e) => { log::error!("key failed to decode at {:?}: {:?}", self.previous_key, e); - continue; - } + continue + }, } } - return None; + return None } } } @@ -871,7 +888,10 @@ impl ChildTriePrefixIterator<(K, T)> { /// Construct iterator to iterate over child trie items in `child_info` with the prefix `prefix`. /// /// NOTE: Iterator with [`Self::drain`] will remove any key or value who failed to decode - pub fn with_prefix_over_key(child_info: &ChildInfo, prefix: &[u8]) -> Self { + pub fn with_prefix_over_key( + child_info: &ChildInfo, + prefix: &[u8], + ) -> Self { let prefix = prefix.to_vec(); let previous_key = prefix.clone(); let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { @@ -888,7 +908,7 @@ impl ChildTriePrefixIterator<(K, T)> { drain: false, fetch_previous_key: true, closure, - } + } } } @@ -905,7 +925,7 @@ impl Iterator for ChildTriePrefixIterator { &self.child_info.storage_key(), &self.previous_key, ) - .filter(|n| n.starts_with(&self.prefix)) + .filter(|n| n.starts_with(&self.prefix)) }; break match maybe_next { Some(next) => { @@ -918,7 +938,7 @@ impl Iterator for ChildTriePrefixIterator { self.previous_key, ); continue - } + }, }; if self.drain { child::kill(&self.child_info, &self.previous_key) @@ -933,11 +953,11 @@ impl Iterator for ChildTriePrefixIterator { e, ); continue - } + }, }; Some(item) - } + }, None => None, } } @@ -999,8 +1019,8 @@ pub trait StoragePrefixedMap { fn translate_values Option>(mut f: F) { let prefix = Self::final_prefix(); let mut previous_key = prefix.clone().to_vec(); - while let Some(next) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix)) + while let Some(next) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { previous_key = next; let maybe_value = unhashed::get::(&previous_key); @@ -1010,10 +1030,7 @@ pub trait StoragePrefixedMap { None => unhashed::kill(&previous_key), }, None => { - log::error!( - "old key failed to decode at {:?}", - previous_key, - ); + log::error!("old key failed to decode at {:?}", previous_key,); continue }, } @@ -1218,13 +1235,13 @@ where #[cfg(test)] mod test { use super::*; + use crate::{assert_ok, hash::Identity}; + use bounded_vec::BoundedVec; + use core::convert::{TryFrom, TryInto}; + use generator::StorageValue as _; use sp_core::hashing::twox_128; - use crate::{hash::Identity, assert_ok}; use sp_io::TestExternalities; - use generator::StorageValue as _; - use bounded_vec::BoundedVec; use weak_bounded_vec::WeakBoundedVec; - use core::convert::{TryFrom, TryInto}; #[test] fn prefixed_map_works() { @@ -1363,8 +1380,7 @@ mod test { #[test] fn key_prefix_iterator_works() { TestExternalities::default().execute_with(|| { - use crate::storage::generator::StorageMap; - use crate::hash::Twox64Concat; + use crate::{hash::Twox64Concat, storage::generator::StorageMap}; struct MyStorageMap; impl StorageMap for MyStorageMap { type Query = u64; @@ -1426,30 +1442,21 @@ mod test { assert_eq!( ChildTriePrefixIterator::with_prefix(&child_info_a, &[2]) .collect::, u16)>>(), - vec![ - (vec![], 8), - (vec![2, 3], 8), - ], + vec![(vec![], 8), (vec![2, 3], 8),], ); assert_eq!( ChildTriePrefixIterator::with_prefix(&child_info_a, &[2]) .drain() .collect::, u16)>>(), - vec![ - (vec![], 8), - (vec![2, 3], 8), - ], + vec![(vec![], 8), (vec![2, 3], 8),], ); // The only remaining is the ones outside prefix assert_eq!( ChildTriePrefixIterator::with_prefix(&child_info_a, &[]) .collect::, u8)>>(), - vec![ - (vec![1, 2, 3], 8), - (vec![3], 8), - ], + vec![(vec![1, 2, 3], 8), (vec![3], 8),], ); child::put(&child_info_a, &[1, 2, 3], &8u16); @@ -1461,28 +1468,21 @@ mod test { assert_eq!( ChildTriePrefixIterator::with_prefix_over_key::(&child_info_a, &[2]) .collect::>(), - vec![ - (u16::decode(&mut &[2, 3][..]).unwrap(), 8), - ], + vec![(u16::decode(&mut &[2, 3][..]).unwrap(), 8),], ); assert_eq!( ChildTriePrefixIterator::with_prefix_over_key::(&child_info_a, &[2]) .drain() .collect::>(), - vec![ - (u16::decode(&mut &[2, 3][..]).unwrap(), 8), - ], + vec![(u16::decode(&mut &[2, 3][..]).unwrap(), 8),], ); // The only remaining is the ones outside prefix assert_eq!( ChildTriePrefixIterator::with_prefix(&child_info_a, &[]) .collect::, u8)>>(), - vec![ - (vec![1, 2, 3], 8), - (vec![3], 8), - ], + vec![(vec![1, 2, 3], 8), (vec![3], 8),], ); }); } diff --git a/substrate/frame/support/src/storage/types/double_map.rs b/substrate/frame/support/src/storage/types/double_map.rs index 5143967d8c978c5016623486c4fafc198a44a7e9..1704f8a647cb6e4e8934bc39bc18ad586161075c 100644 --- a/substrate/frame/support/src/storage/types/double_map.rs +++ b/substrate/frame/support/src/storage/types/double_map.rs @@ -18,14 +18,14 @@ //! Storage map type. Implements StorageDoubleMap, StorageIterableDoubleMap, //! StoragePrefixedDoubleMap traits and their methods directly. -use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use crate::{ storage::{ - StorageAppend, StorageTryAppend, StorageDecodeLength, StoragePrefixedMap, - types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, + types::{OnEmptyGetter, OptionQuery, QueryKindTrait}, + StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, - traits::{GetDefault, StorageInstance, Get, StorageInfo}, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, }; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; use sp_arithmetic::traits::SaturatedConversion; use sp_std::prelude::*; @@ -36,9 +36,9 @@ use sp_std::prelude::*; /// Each value is stored at: /// ```nocompile /// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key1)) -/// ++ Hasher2(encode(key2)) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key1)) +/// ++ Hasher2(encode(key2)) /// ``` /// /// # Warning @@ -53,18 +53,26 @@ pub struct StorageDoubleMap< Hasher2, Key2, Value, - QueryKind=OptionQuery, - OnEmpty=GetDefault, - MaxValues=GetDefault, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, >( - core::marker::PhantomData< - (Prefix, Hasher1, Key1, Hasher2, Key2, Value, QueryKind, OnEmpty, MaxValues) - > + core::marker::PhantomData<( + Prefix, + Hasher1, + Key1, + Hasher2, + Key2, + Value, + QueryKind, + OnEmpty, + MaxValues, + )>, ); impl - crate::storage::generator::StorageDoubleMap for - StorageDoubleMap + crate::storage::generator::StorageDoubleMap + for StorageDoubleMap where Prefix: StorageInstance, Hasher1: crate::hash::StorageHasher, @@ -94,8 +102,8 @@ where } impl - StoragePrefixedMap for - StorageDoubleMap + StoragePrefixedMap + for StorageDoubleMap where Prefix: StorageInstance, Hasher1: crate::hash::StorageHasher, @@ -161,7 +169,8 @@ where pub fn try_get(k1: KArg1, k2: KArg2) -> Result where KArg1: EncodeLike, - KArg2: EncodeLike { + KArg2: EncodeLike, + { >::try_get(k1, k2) } @@ -175,8 +184,12 @@ where } /// Swap the values of two key-pairs. - pub fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) - where + pub fn swap( + x_k1: XKArg1, + x_k2: XKArg2, + y_k1: YKArg1, + y_k2: YKArg2, + ) where XKArg1: EncodeLike, XKArg2: EncodeLike, YKArg1: EncodeLike, @@ -206,13 +219,16 @@ where /// Remove all values under the first key. pub fn remove_prefix(k1: KArg1, limit: Option) -> sp_io::KillStorageResult - where KArg1: ?Sized + EncodeLike { + where + KArg1: ?Sized + EncodeLike, + { >::remove_prefix(k1, limit) } /// Iterate over values that share the first key. pub fn iter_prefix_values(k1: KArg1) -> crate::storage::PrefixIterator - where KArg1: ?Sized + EncodeLike + where + KArg1: ?Sized + EncodeLike, { >::iter_prefix_values(k1) } @@ -266,11 +282,8 @@ where /// If the storage item is not encoded properly, the storage will be overwritten /// and set to `[item]`. Any default value set for the storage item will be ignored /// on overwrite. - pub fn append( - k1: KArg1, - k2: KArg2, - item: EncodeLikeItem, - ) where + pub fn append(k1: KArg1, k2: KArg2, item: EncodeLikeItem) + where KArg1: EncodeLike, KArg2: EncodeLike, Item: Encode, @@ -310,10 +323,16 @@ where OldHasher2: crate::StorageHasher, KeyArg1: EncodeLike, KeyArg2: EncodeLike, - >(key1: KeyArg1, key2: KeyArg2) -> Option { - < - Self as crate::storage::StorageDoubleMap - >::migrate_keys::(key1, key2) + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option { + >::migrate_keys::< + OldHasher1, + OldHasher2, + _, + _, + >(key1, key2) } /// Remove all value of the storage. @@ -360,9 +379,9 @@ where EncodeLikeItem: EncodeLike, Value: StorageTryAppend, { - < - Self as crate::storage::TryAppendDoubleMap - >::try_append(key1, key2, item) + >::try_append( + key1, key2, item, + ) } } @@ -401,7 +420,9 @@ where /// /// If you add elements with first key `k1` to the map while doing this, you'll get undefined /// results. - pub fn drain_prefix(k1: impl EncodeLike) -> crate::storage::PrefixIterator<(Key2, Value)> { + pub fn drain_prefix( + k1: impl EncodeLike, + ) -> crate::storage::PrefixIterator<(Key2, Value)> { >::drain_prefix(k1) } @@ -448,8 +469,8 @@ pub trait StorageDoubleMapMetadata { } impl - StorageDoubleMapMetadata for - StorageDoubleMap + StorageDoubleMapMetadata + for StorageDoubleMap where Prefix: StorageInstance, Hasher1: crate::hash::StorageHasher, @@ -470,8 +491,8 @@ where } impl - crate::traits::StorageInfoTrait for - StorageDoubleMap + crate::traits::StorageInfoTrait + for StorageDoubleMap where Prefix: StorageInstance, Hasher1: crate::hash::StorageHasher, @@ -484,27 +505,25 @@ where MaxValues: Get>, { fn storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::final_prefix().to_vec(), - max_values: MaxValues::get(), - max_size: Some( - Hasher1::max_len::() - .saturating_add(Hasher2::max_len::()) - .saturating_add(Value::max_encoded_len()) - .saturated_into(), - ), - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: Some( + Hasher1::max_len::() + .saturating_add(Hasher2::max_len::()) + .saturating_add(Value::max_encoded_len()) + .saturated_into(), + ), + }] } } /// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. impl - crate::traits::PartialStorageInfoTrait for - StorageDoubleMap + crate::traits::PartialStorageInfoTrait + for StorageDoubleMap where Prefix: StorageInstance, Hasher1: crate::hash::StorageHasher, @@ -517,29 +536,28 @@ where MaxValues: Get>, { fn partial_storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::final_prefix().to_vec(), - max_values: MaxValues::get(), - max_size: None - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: None, + }] } } #[cfg(test)] mod test { use super::*; - use sp_io::{TestExternalities, hashing::twox_128}; - use crate::hash::*; - use crate::storage::types::ValueQuery; + use crate::{hash::*, storage::types::ValueQuery}; use frame_metadata::StorageEntryModifier; + use sp_io::{hashing::twox_128, TestExternalities}; struct Prefix; impl StorageInstance for Prefix { - fn pallet_prefix() -> &'static str { "test" } + fn pallet_prefix() -> &'static str { + "test" + } const STORAGE_PREFIX: &'static str = "foo"; } @@ -552,11 +570,17 @@ mod test { #[test] fn test() { - type A = StorageDoubleMap< - Prefix, Blake2_128Concat, u16, Twox64Concat, u8, u32, OptionQuery - >; + type A = + StorageDoubleMap; type AValueQueryWithAnOnEmpty = StorageDoubleMap< - Prefix, Blake2_128Concat, u16, Twox64Concat, u8, u32, ValueQuery, ADefault + Prefix, + Blake2_128Concat, + u16, + Twox64Concat, + u8, + u32, + ValueQuery, + ADefault, >; type B = StorageDoubleMap; type C = StorageDoubleMap; @@ -598,17 +622,20 @@ mod test { A::remove(2, 20); let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { - *v = *v * 2; Ok(()) + *v = *v * 2; + Ok(()) }); let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { - *v = *v * 2; Ok(()) + *v = *v * 2; + Ok(()) }); assert_eq!(A::contains_key(2, 20), true); assert_eq!(A::get(2, 20), Some(97 * 4)); A::remove(2, 20); let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, 20, |v| { - *v = *v * 2; Err(()) + *v = *v * 2; + Err(()) }); assert_eq!(A::contains_key(2, 20), false); @@ -647,7 +674,6 @@ mod test { assert_eq!(A::contains_key(2, 20), true); assert_eq!(A::get(2, 20), Some(100)); - A::insert(2, 20, 10); assert_eq!(A::take(2, 20), Some(10)); assert_eq!(A::contains_key(2, 20), false); @@ -672,7 +698,7 @@ mod test { C::insert(3, 30, 10); C::insert(4, 40, 10); - A::translate_values::(|v| Some((v * 2).into())); + A::translate_values::(|v| Some((v * 2).into())); assert_eq!(A::iter().collect::>(), vec![(4, 40, 20), (3, 30, 20)]); A::insert(3, 30, 10); @@ -683,7 +709,7 @@ mod test { C::insert(3, 30, 10); C::insert(4, 40, 10); - A::translate::(|k1, k2, v| Some((k1 * k2 as u16 * v as u16).into())); + A::translate::(|k1, k2, v| Some((k1 * k2 as u16 * v as u16).into())); assert_eq!(A::iter().collect::>(), vec![(4, 40, 1600), (3, 30, 900)]); assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); diff --git a/substrate/frame/support/src/storage/types/key.rs b/substrate/frame/support/src/storage/types/key.rs index cafb501f9e4182d90bebc41336207d03d9dadbdd..a8cdb4546a6f8314259d074921ced3ef9b39a7bd 100755 --- a/substrate/frame/support/src/storage/types/key.rs +++ b/substrate/frame/support/src/storage/types/key.rs @@ -75,24 +75,16 @@ impl KeyGenerator for Key { const HASHER_METADATA: &'static [frame_metadata::StorageHasher] = &[H::METADATA]; fn final_key + TupleToEncodedIter>(key: KArg) -> Vec { - H::hash( - &key.to_encoded_iter() - .next() - .expect("should have at least one element!"), - ) - .as_ref() - .to_vec() + H::hash(&key.to_encoded_iter().next().expect("should have at least one element!")) + .as_ref() + .to_vec() } fn migrate_key + TupleToEncodedIter>( key: &KArg, hash_fns: Self::HArg, ) -> Vec { - (hash_fns.0)( - &key.to_encoded_iter() - .next() - .expect("should have at least one element!"), - ) + (hash_fns.0)(&key.to_encoded_iter().next().expect("should have at least one element!")) } } @@ -118,9 +110,8 @@ impl KeyGenerator for Tuple { for_tuples!( type HArg = ( #(Tuple::HashFn),* ); ); type HashFn = Box Vec>; - const HASHER_METADATA: &'static [frame_metadata::StorageHasher] = &[ - for_tuples!( #(Tuple::Hasher::METADATA),* ) - ]; + const HASHER_METADATA: &'static [frame_metadata::StorageHasher] = + &[for_tuples!( #(Tuple::Hasher::METADATA),* )]; fn final_key + TupleToEncodedIter>(key: KArg) -> Vec { let mut final_key = Vec::new(); @@ -210,9 +201,7 @@ pub trait TupleToEncodedIter { #[tuple_types_custom_trait_bound(Encode)] impl TupleToEncodedIter for Tuple { fn to_encoded_iter(&self) -> sp_std::vec::IntoIter> { - [for_tuples!( #(self.Tuple.encode()),* )] - .to_vec() - .into_iter() + [for_tuples!( #(self.Tuple.encode()),* )].to_vec().into_iter() } } @@ -246,7 +235,7 @@ impl ReversibleKeyGenerator for Tuple { fn decode_final_key(key_material: &[u8]) -> Result<(Self::Key, &[u8]), codec::Error> { let mut current_key_material = key_material; Ok(( - (for_tuples!{ + (for_tuples! { #({ let (key, material) = Tuple::decode_final_key(current_key_material)?; current_key_material = material; diff --git a/substrate/frame/support/src/storage/types/map.rs b/substrate/frame/support/src/storage/types/map.rs index 168d5236ccfbd9228d7f2f5dfcc476de7a44eb6f..00fa3a3b8b40ed3c2d260226dfa5528f97bba76a 100644 --- a/substrate/frame/support/src/storage/types/map.rs +++ b/substrate/frame/support/src/storage/types/map.rs @@ -18,14 +18,14 @@ //! Storage map type. Implements StorageMap, StorageIterableMap, StoragePrefixedMap traits and their //! methods directly. -use codec::{FullCodec, Decode, EncodeLike, Encode, MaxEncodedLen}; use crate::{ storage::{ - StorageAppend, StorageTryAppend, StorageDecodeLength, StoragePrefixedMap, - types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, + types::{OnEmptyGetter, OptionQuery, QueryKindTrait}, + StorageAppend, StorageDecodeLength, StoragePrefixedMap, StorageTryAppend, }, - traits::{GetDefault, StorageInstance, Get, StorageInfo}, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, }; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; use sp_arithmetic::traits::SaturatedConversion; use sp_std::prelude::*; @@ -35,8 +35,8 @@ use sp_std::prelude::*; /// Each value is stored at: /// ```nocompile /// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key)) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key)) /// ``` /// /// # Warning @@ -44,10 +44,14 @@ use sp_std::prelude::*; /// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as /// `blake2_128_concat` must be used. Otherwise, other values in storage can be compromised. pub struct StorageMap< - Prefix, Hasher, Key, Value, QueryKind=OptionQuery, OnEmpty=GetDefault, MaxValues=GetDefault, ->( - core::marker::PhantomData<(Prefix, Hasher, Key, Value, QueryKind, OnEmpty, MaxValues)> -); + Prefix, + Hasher, + Key, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>(core::marker::PhantomData<(Prefix, Hasher, Key, Value, QueryKind, OnEmpty, MaxValues)>); impl crate::storage::generator::StorageMap @@ -77,9 +81,8 @@ where } } -impl - StoragePrefixedMap for - StorageMap +impl StoragePrefixedMap + for StorageMap where Prefix: StorageInstance, Hasher: crate::hash::StorageHasher, @@ -148,7 +151,7 @@ where /// Mutate the value under a key. pub fn mutate, R, F: FnOnce(&mut QueryKind::Query) -> R>( key: KeyArg, - f: F + f: F, ) -> R { >::mutate(key, f) } @@ -165,7 +168,7 @@ where /// Mutate the value under a key. Deletes the item if mutated to a `None`. pub fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( key: KeyArg, - f: F + f: F, ) -> R { >::mutate_exists(key, f) } @@ -198,7 +201,7 @@ where EncodeLikeKey: EncodeLike, Item: Encode, EncodeLikeItem: EncodeLike, - Value: StorageAppend + Value: StorageAppend, { >::append(key, item) } @@ -216,7 +219,8 @@ where /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. pub fn decode_len>(key: KeyArg) -> Option - where Value: StorageDecodeLength, + where + Value: StorageDecodeLength, { >::decode_len(key) } @@ -225,7 +229,7 @@ where /// /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. pub fn migrate_key>( - key: KeyArg + key: KeyArg, ) -> Option { >::migrate_key::(key) } @@ -263,19 +267,14 @@ where /// Try and append the given item to the value in the storage. /// /// Is only available if `Value` of the storage implements [`StorageTryAppend`]. - pub fn try_append( - key: KArg, - item: EncodeLikeItem, - ) -> Result<(), ()> + pub fn try_append(key: KArg, item: EncodeLikeItem) -> Result<(), ()> where KArg: EncodeLike + Clone, Item: Encode, EncodeLikeItem: EncodeLike, Value: StorageTryAppend, { - < - Self as crate::storage::TryAppendMap - >::try_append(key, item) + >::try_append(key, item) } } @@ -332,7 +331,8 @@ pub trait StorageMapMetadata { } impl StorageMapMetadata - for StorageMap where + for StorageMap +where Prefix: StorageInstance, Hasher: crate::hash::StorageHasher, Key: FullCodec, @@ -348,9 +348,8 @@ impl StorageMapMetada DefaultByteGetter(&OnEmptyGetter::(core::marker::PhantomData)); } -impl - crate::traits::StorageInfoTrait for - StorageMap +impl crate::traits::StorageInfoTrait + for StorageMap where Prefix: StorageInstance, Hasher: crate::hash::StorageHasher, @@ -361,26 +360,24 @@ where MaxValues: Get>, { fn storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::final_prefix().to_vec(), - max_values: MaxValues::get(), - max_size: Some( - Hasher::max_len::() - .saturating_add(Value::max_encoded_len()) - .saturated_into(), - ), - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: Some( + Hasher::max_len::() + .saturating_add(Value::max_encoded_len()) + .saturated_into(), + ), + }] } } /// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. impl - crate::traits::PartialStorageInfoTrait for - StorageMap + crate::traits::PartialStorageInfoTrait + for StorageMap where Prefix: StorageInstance, Hasher: crate::hash::StorageHasher, @@ -391,29 +388,28 @@ where MaxValues: Get>, { fn partial_storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::final_prefix().to_vec(), - max_values: MaxValues::get(), - max_size: None, - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: None, + }] } } #[cfg(test)] mod test { use super::*; - use sp_io::{TestExternalities, hashing::twox_128}; - use crate::hash::*; - use crate::storage::types::ValueQuery; + use crate::{hash::*, storage::types::ValueQuery}; use frame_metadata::StorageEntryModifier; + use sp_io::{hashing::twox_128, TestExternalities}; struct Prefix; impl StorageInstance for Prefix { - fn pallet_prefix() -> &'static str { "test" } + fn pallet_prefix() -> &'static str { + "test" + } const STORAGE_PREFIX: &'static str = "foo"; } @@ -427,9 +423,8 @@ mod test { #[test] fn test() { type A = StorageMap; - type AValueQueryWithAnOnEmpty = StorageMap< - Prefix, Blake2_128Concat, u16, u32, ValueQuery, ADefault - >; + type AValueQueryWithAnOnEmpty = + StorageMap; type B = StorageMap; type C = StorageMap; type WithLen = StorageMap>; @@ -471,17 +466,20 @@ mod test { A::remove(2); let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { - *v = *v * 2; Ok(()) + *v = *v * 2; + Ok(()) }); let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { - *v = *v * 2; Ok(()) + *v = *v * 2; + Ok(()) }); assert_eq!(A::contains_key(2), true); assert_eq!(A::get(2), Some(97 * 4)); A::remove(2); let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(2, |v| { - *v = *v * 2; Err(()) + *v = *v * 2; + Err(()) }); assert_eq!(A::contains_key(2), false); @@ -519,7 +517,6 @@ mod test { assert_eq!(A::contains_key(2), true); assert_eq!(A::get(2), Some(100)); - A::insert(2, 10); assert_eq!(A::take(2), Some(10)); assert_eq!(A::contains_key(2), false); @@ -543,7 +540,7 @@ mod test { C::insert(3, 10); C::insert(4, 10); - A::translate_values::(|v| Some((v * 2).into())); + A::translate_values::(|v| Some((v * 2).into())); assert_eq!(A::iter().collect::>(), vec![(4, 20), (3, 20)]); A::insert(3, 10); @@ -554,7 +551,7 @@ mod test { C::insert(3, 10); C::insert(4, 10); - A::translate::(|k, v| Some((k * v as u16).into())); + A::translate::(|k, v| Some((k * v as u16).into())); assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); diff --git a/substrate/frame/support/src/storage/types/mod.rs b/substrate/frame/support/src/storage/types/mod.rs index f61065671315f63adf2941cb39477b4e710d9551..f800f33dc3162f927048fb9818c306bbbf5505d9 100644 --- a/substrate/frame/support/src/storage/types/mod.rs +++ b/substrate/frame/support/src/storage/types/mod.rs @@ -30,7 +30,7 @@ mod value; pub use double_map::{StorageDoubleMap, StorageDoubleMapMetadata}; pub use key::{ EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, Key, KeyGenerator, - ReversibleKeyGenerator, TupleToEncodedIter, KeyGeneratorMaxEncodedLen, + KeyGeneratorMaxEncodedLen, ReversibleKeyGenerator, TupleToEncodedIter, }; pub use map::{StorageMap, StorageMapMetadata}; pub use nmap::{StorageNMap, StorageNMapMetadata}; diff --git a/substrate/frame/support/src/storage/types/nmap.rs b/substrate/frame/support/src/storage/types/nmap.rs index 63c27729d2811a06a1ef6cd12ba021a2b03b3ddd..f62cd1435a2dff6aa71775ab27375c07414534ed 100755 --- a/substrate/frame/support/src/storage/types/nmap.rs +++ b/substrate/frame/support/src/storage/types/nmap.rs @@ -21,12 +21,12 @@ use crate::{ storage::{ types::{ - EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, OnEmptyGetter, - OptionQuery, QueryKindTrait, TupleToEncodedIter, + EncodeLikeTuple, HasKeyPrefix, HasReversibleKeyPrefix, OnEmptyGetter, OptionQuery, + QueryKindTrait, TupleToEncodedIter, }, KeyGenerator, PrefixIterator, StorageAppend, StorageDecodeLength, StoragePrefixedMap, }, - traits::{Get, GetDefault, StorageInstance, StorageInfo}, + traits::{Get, GetDefault, StorageInfo, StorageInstance}, }; use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; @@ -39,9 +39,9 @@ use sp_std::prelude::*; /// Each value is stored at: /// ```nocompile /// Twox128(Prefix::pallet_prefix()) -/// ++ Twox128(Prefix::STORAGE_PREFIX) -/// ++ Hasher1(encode(key1)) -/// ++ Hasher2(encode(key2)) +/// ++ Twox128(Prefix::STORAGE_PREFIX) +/// ++ Hasher1(encode(key1)) +/// ++ Hasher2(encode(key2)) /// ++ ... /// ++ HasherN(encode(keyN)) /// ``` @@ -52,10 +52,13 @@ use sp_std::prelude::*; /// such as `blake2_128_concat` must be used for the key hashers. Otherwise, other values /// in storage can be compromised. pub struct StorageNMap< - Prefix, Key, Value, QueryKind = OptionQuery, OnEmpty = GetDefault, MaxValues=GetDefault, ->( - core::marker::PhantomData<(Prefix, Key, Value, QueryKind, OnEmpty, MaxValues)>, -); + Prefix, + Key, + Value, + QueryKind = OptionQuery, + OnEmpty = GetDefault, + MaxValues = GetDefault, +>(core::marker::PhantomData<(Prefix, Key, Value, QueryKind, OnEmpty, MaxValues)>); impl crate::storage::generator::StorageNMap @@ -83,8 +86,7 @@ where } } -impl - crate::storage::StoragePrefixedMap +impl crate::storage::StoragePrefixedMap for StorageNMap where Prefix: StorageInstance, @@ -113,7 +115,9 @@ where MaxValues: Get>, { /// Get the storage key used to fetch a value corresponding to a specific key. - pub fn hashed_key_for + TupleToEncodedIter>(key: KArg) -> Vec { + pub fn hashed_key_for + TupleToEncodedIter>( + key: KArg, + ) -> Vec { >::hashed_key_for(key) } @@ -123,7 +127,9 @@ where } /// Load the value associated with the given key from the map. - pub fn get + TupleToEncodedIter>(key: KArg) -> QueryKind::Query { + pub fn get + TupleToEncodedIter>( + key: KArg, + ) -> QueryKind::Query { >::get(key) } @@ -137,7 +143,9 @@ where } /// Take a value from storage, removing it afterwards. - pub fn take + TupleToEncodedIter>(key: KArg) -> QueryKind::Query { + pub fn take + TupleToEncodedIter>( + key: KArg, + ) -> QueryKind::Query { >::take(key) } @@ -248,7 +256,9 @@ where /// /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. - pub fn decode_len + TupleToEncodedIter>(key: KArg) -> Option + pub fn decode_len + TupleToEncodedIter>( + key: KArg, + ) -> Option where Value: StorageDecodeLength, { @@ -260,7 +270,7 @@ where /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. pub fn migrate_keys(key: KArg, hash_fns: Key::HArg) -> Option where - KArg: EncodeLikeTuple + TupleToEncodedIter + KArg: EncodeLikeTuple + TupleToEncodedIter, { >::migrate_keys::<_>(key, hash_fns) } @@ -398,15 +408,13 @@ where { const MODIFIER: StorageEntryModifier = QueryKind::METADATA; const NAME: &'static str = Prefix::STORAGE_PREFIX; - const DEFAULT: DefaultByteGetter = DefaultByteGetter( - &OnEmptyGetter::(core::marker::PhantomData), - ); + const DEFAULT: DefaultByteGetter = + DefaultByteGetter(&OnEmptyGetter::(core::marker::PhantomData)); const HASHERS: &'static [frame_metadata::StorageHasher] = Key::HASHER_METADATA; } -impl - crate::traits::StorageInfoTrait for - StorageNMap +impl crate::traits::StorageInfoTrait + for StorageNMap where Prefix: StorageInstance, Key: super::key::KeyGenerator + super::key::KeyGeneratorMaxEncodedLen, @@ -416,26 +424,23 @@ where MaxValues: Get>, { fn storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::final_prefix().to_vec(), - max_values: MaxValues::get(), - max_size: Some( - Key::key_max_encoded_len() - .saturating_add(Value::max_encoded_len()) - .saturated_into(), - ), - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: Some( + Key::key_max_encoded_len() + .saturating_add(Value::max_encoded_len()) + .saturated_into(), + ), + }] } } /// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. -impl - crate::traits::PartialStorageInfoTrait for - StorageNMap +impl crate::traits::PartialStorageInfoTrait + for StorageNMap where Prefix: StorageInstance, Key: super::key::KeyGenerator, @@ -445,22 +450,22 @@ where MaxValues: Get>, { fn partial_storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::final_prefix().to_vec(), - max_values: MaxValues::get(), - max_size: None, - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::final_prefix().to_vec(), + max_values: MaxValues::get(), + max_size: None, + }] } } #[cfg(test)] mod test { use super::*; - use crate::hash::*; - use crate::storage::types::{Key, ValueQuery}; + use crate::{ + hash::*, + storage::types::{Key, ValueQuery}, + }; use frame_metadata::StorageEntryModifier; use sp_io::{hashing::twox_128, TestExternalities}; @@ -627,15 +632,9 @@ mod test { assert_eq!(A::iter().collect::>(), vec![(4, 40), (3, 30)]); assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!( - AValueQueryWithAnOnEmpty::MODIFIER, - StorageEntryModifier::Default - ); + assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); assert_eq!(A::NAME, "Foo"); - assert_eq!( - AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), - 98u32.encode() - ); + assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 98u32.encode()); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); WithLen::remove_all(None); @@ -787,41 +786,23 @@ mod test { C::insert((3, 30), 10); C::insert((4, 40), 10); A::translate_values::(|v| Some((v * 2).into())); - assert_eq!( - A::iter().collect::>(), - vec![((4, 40), 20), ((3, 30), 20)] - ); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 20), ((3, 30), 20)]); A::insert((3, 30), 10); A::insert((4, 40), 10); - assert_eq!( - A::iter().collect::>(), - vec![((4, 40), 10), ((3, 30), 10)] - ); - assert_eq!( - A::drain().collect::>(), - vec![((4, 40), 10), ((3, 30), 10)] - ); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 10), ((3, 30), 10)]); + assert_eq!(A::drain().collect::>(), vec![((4, 40), 10), ((3, 30), 10)]); assert_eq!(A::iter().collect::>(), vec![]); C::insert((3, 30), 10); C::insert((4, 40), 10); A::translate::(|(k1, k2), v| Some((k1 * k2 as u16 * v as u16).into())); - assert_eq!( - A::iter().collect::>(), - vec![((4, 40), 1600), ((3, 30), 900)] - ); + assert_eq!(A::iter().collect::>(), vec![((4, 40), 1600), ((3, 30), 900)]); assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!( - AValueQueryWithAnOnEmpty::MODIFIER, - StorageEntryModifier::Default - ); + assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); assert_eq!(A::NAME, "Foo"); - assert_eq!( - AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), - 98u32.encode() - ); + assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 98u32.encode()); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); WithLen::remove_all(None); @@ -833,14 +814,8 @@ mod test { A::insert((3, 31), 12); A::insert((4, 40), 13); A::insert((4, 41), 14); - assert_eq!( - A::iter_prefix_values((3,)).collect::>(), - vec![12, 11] - ); - assert_eq!( - A::iter_prefix_values((4,)).collect::>(), - vec![13, 14] - ); + assert_eq!(A::iter_prefix_values((3,)).collect::>(), vec![12, 11]); + assert_eq!(A::iter_prefix_values((4,)).collect::>(), vec![13, 14]); }); } @@ -848,52 +823,32 @@ mod test { fn test_3_keys() { type A = StorageNMap< Prefix, - ( - Key, - Key, - Key, - ), + (Key, Key, Key), u32, OptionQuery, >; type AValueQueryWithAnOnEmpty = StorageNMap< Prefix, - ( - Key, - Key, - Key, - ), + (Key, Key, Key), u32, ValueQuery, ADefault, >; type B = StorageNMap< Prefix, - ( - Key, - Key, - Key, - ), + (Key, Key, Key), u32, ValueQuery, >; type C = StorageNMap< Prefix, - ( - Key, - Key, - Key, - ), + (Key, Key, Key), u8, ValueQuery, >; type WithLen = StorageNMap< Prefix, - ( - Key, - Key, - Key, - ), + (Key, Key, Key), Vec, >; @@ -916,11 +871,7 @@ mod test { assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 30); A::swap::< - ( - Key, - Key, - Key, - ), + (Key, Key, Key), _, _, >((1, 10, 100), (2, 20, 200)); @@ -1020,17 +971,11 @@ mod test { C::insert((3, 30, 300), 10); C::insert((4, 40, 400), 10); A::translate_values::(|v| Some((v * 2).into())); - assert_eq!( - A::iter().collect::>(), - vec![((4, 40, 400), 20), ((3, 30, 300), 20)] - ); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 20), ((3, 30, 300), 20)]); A::insert((3, 30, 300), 10); A::insert((4, 40, 400), 10); - assert_eq!( - A::iter().collect::>(), - vec![((4, 40, 400), 10), ((3, 30, 300), 10)] - ); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 10), ((3, 30, 300), 10)]); assert_eq!( A::drain().collect::>(), vec![((4, 40, 400), 10), ((3, 30, 300), 10)] @@ -1042,21 +987,12 @@ mod test { A::translate::(|(k1, k2, k3), v| { Some((k1 * k2 as u16 * v as u16 / k3 as u16).into()) }); - assert_eq!( - A::iter().collect::>(), - vec![((4, 40, 400), 4), ((3, 30, 300), 3)] - ); + assert_eq!(A::iter().collect::>(), vec![((4, 40, 400), 4), ((3, 30, 300), 3)]); assert_eq!(A::MODIFIER, StorageEntryModifier::Optional); - assert_eq!( - AValueQueryWithAnOnEmpty::MODIFIER, - StorageEntryModifier::Default - ); + assert_eq!(AValueQueryWithAnOnEmpty::MODIFIER, StorageEntryModifier::Default); assert_eq!(A::NAME, "Foo"); - assert_eq!( - AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), - 98u32.encode() - ); + assert_eq!(AValueQueryWithAnOnEmpty::DEFAULT.0.default_byte(), 98u32.encode()); assert_eq!(A::DEFAULT.0.default_byte(), Option::::None.encode()); WithLen::remove_all(None); @@ -1068,22 +1004,10 @@ mod test { A::insert((3, 30, 301), 12); A::insert((4, 40, 400), 13); A::insert((4, 40, 401), 14); - assert_eq!( - A::iter_prefix_values((3,)).collect::>(), - vec![11, 12] - ); - assert_eq!( - A::iter_prefix_values((4,)).collect::>(), - vec![14, 13] - ); - assert_eq!( - A::iter_prefix_values((3, 30)).collect::>(), - vec![11, 12] - ); - assert_eq!( - A::iter_prefix_values((4, 40)).collect::>(), - vec![14, 13] - ); + assert_eq!(A::iter_prefix_values((3,)).collect::>(), vec![11, 12]); + assert_eq!(A::iter_prefix_values((4,)).collect::>(), vec![14, 13]); + assert_eq!(A::iter_prefix_values((3, 30)).collect::>(), vec![11, 12]); + assert_eq!(A::iter_prefix_values((4, 40)).collect::>(), vec![14, 13]); }); } } diff --git a/substrate/frame/support/src/storage/types/value.rs b/substrate/frame/support/src/storage/types/value.rs index 3fe7d436402411aa375e16128a8811fb3dfe2d06..ad835e928bdd6cb828e77f2169a6890d56f0b031 100644 --- a/substrate/frame/support/src/storage/types/value.rs +++ b/substrate/frame/support/src/storage/types/value.rs @@ -17,15 +17,15 @@ //! Storage value type. Implements StorageValue trait and its method directly. -use codec::{FullCodec, Decode, EncodeLike, Encode, MaxEncodedLen}; use crate::{ storage::{ - StorageAppend, StorageTryAppend, StorageDecodeLength, - types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, - generator::{StorageValue as StorageValueT}, + generator::StorageValue as StorageValueT, + types::{OnEmptyGetter, OptionQuery, QueryKindTrait}, + StorageAppend, StorageDecodeLength, StorageTryAppend, }, - traits::{GetDefault, StorageInstance, StorageInfo}, + traits::{GetDefault, StorageInfo, StorageInstance}, }; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; use sp_arithmetic::traits::SaturatedConversion; use sp_std::prelude::*; @@ -36,12 +36,12 @@ use sp_std::prelude::*; /// ```nocompile /// Twox128(Prefix::pallet_prefix()) ++ Twox128(Prefix::STORAGE_PREFIX) /// ``` -pub struct StorageValue( - core::marker::PhantomData<(Prefix, Value, QueryKind, OnEmpty)> +pub struct StorageValue( + core::marker::PhantomData<(Prefix, Value, QueryKind, OnEmpty)>, ); -impl crate::storage::generator::StorageValue for - StorageValue +impl crate::storage::generator::StorageValue + for StorageValue where Prefix: StorageInstance, Value: FullCodec, @@ -71,13 +71,19 @@ where OnEmpty: crate::traits::Get + 'static, { /// Get the storage key. - pub fn hashed_key() -> [u8; 32] { >::hashed_key() } + pub fn hashed_key() -> [u8; 32] { + >::hashed_key() + } /// Does the value (explicitly) exist in storage? - pub fn exists() -> bool { >::exists() } + pub fn exists() -> bool { + >::exists() + } /// Load the value from the provided storage instance. - pub fn get() -> QueryKind::Query { >::get() } + pub fn get() -> QueryKind::Query { + >::get() + } /// Try to get the underlying value from the provided storage instance; `Ok` if it exists, /// `Err` if not. @@ -120,7 +126,9 @@ where /// Store a value under this key into the provided storage instance. /// /// this uses the query type rather than the underlying value. - pub fn set(val: QueryKind::Query) { >::set(val) } + pub fn set(val: QueryKind::Query) { + >::set(val) + } /// Mutate the value pub fn mutate R>(f: F) -> R { @@ -135,10 +143,14 @@ where } /// Clear the storage value. - pub fn kill() { >::kill() } + pub fn kill() { + >::kill() + } /// Take a value from storage, removing it afterwards. - pub fn take() -> QueryKind::Query { >::take() } + pub fn take() -> QueryKind::Query { + >::take() + } /// Append the given item to the value in the storage. /// @@ -153,7 +165,7 @@ where where Item: Encode, EncodeLikeItem: EncodeLike, - Value: StorageAppend + Value: StorageAppend, { >::append(item) } @@ -169,7 +181,10 @@ where /// /// `None` does not mean that `get()` does not return a value. The default value is completly /// ignored by this function. - pub fn decode_len() -> Option where Value: StorageDecodeLength { + pub fn decode_len() -> Option + where + Value: StorageDecodeLength, + { >::decode_len() } @@ -194,7 +209,8 @@ pub trait StorageValueMetadata { } impl StorageValueMetadata - for StorageValue where + for StorageValue +where Prefix: StorageInstance, Value: FullCodec, QueryKind: QueryKindTrait, @@ -206,64 +222,57 @@ impl StorageValueMetadata DefaultByteGetter(&OnEmptyGetter::(core::marker::PhantomData)); } -impl - crate::traits::StorageInfoTrait for - StorageValue +impl crate::traits::StorageInfoTrait + for StorageValue where Prefix: StorageInstance, Value: FullCodec + MaxEncodedLen, QueryKind: QueryKindTrait, - OnEmpty: crate::traits::Get + 'static + OnEmpty: crate::traits::Get + 'static, { fn storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::hashed_key().to_vec(), - max_values: Some(1), - max_size: Some( - Value::max_encoded_len() - .saturated_into(), - ), - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::hashed_key().to_vec(), + max_values: Some(1), + max_size: Some(Value::max_encoded_len().saturated_into()), + }] } } /// It doesn't require to implement `MaxEncodedLen` and give no information for `max_size`. -impl - crate::traits::PartialStorageInfoTrait for - StorageValue +impl crate::traits::PartialStorageInfoTrait + for StorageValue where Prefix: StorageInstance, Value: FullCodec, QueryKind: QueryKindTrait, - OnEmpty: crate::traits::Get + 'static + OnEmpty: crate::traits::Get + 'static, { fn partial_storage_info() -> Vec { - vec![ - StorageInfo { - pallet_name: Self::module_prefix().to_vec(), - storage_name: Self::storage_prefix().to_vec(), - prefix: Self::hashed_key().to_vec(), - max_values: Some(1), - max_size: None, - } - ] + vec![StorageInfo { + pallet_name: Self::module_prefix().to_vec(), + storage_name: Self::storage_prefix().to_vec(), + prefix: Self::hashed_key().to_vec(), + max_values: Some(1), + max_size: None, + }] } } #[cfg(test)] mod test { use super::*; - use sp_io::{TestExternalities, hashing::twox_128}; use crate::storage::types::ValueQuery; use frame_metadata::StorageEntryModifier; + use sp_io::{hashing::twox_128, TestExternalities}; struct Prefix; impl StorageInstance for Prefix { - fn pallet_prefix() -> &'static str { "test" } + fn pallet_prefix() -> &'static str { + "test" + } const STORAGE_PREFIX: &'static str = "foo"; } @@ -309,10 +318,16 @@ mod test { assert_eq!(A::try_get(), Ok(4)); A::set(Some(4)); - let _: Result<(), ()> = A::try_mutate(|v| { *v = Some(v.unwrap() * 2); Ok(()) }); + let _: Result<(), ()> = A::try_mutate(|v| { + *v = Some(v.unwrap() * 2); + Ok(()) + }); assert_eq!(A::try_get(), Ok(8)); - let _: Result<(), ()> = A::try_mutate(|v| { *v = Some(v.unwrap() * 2); Err(()) }); + let _: Result<(), ()> = A::try_mutate(|v| { + *v = Some(v.unwrap() * 2); + Err(()) + }); assert_eq!(A::try_get(), Ok(8)); A::kill(); @@ -321,7 +336,8 @@ mod test { AValueQueryWithAnOnEmpty::kill(); let _: Result<(), ()> = AValueQueryWithAnOnEmpty::try_mutate(|v| { - *v = *v * 2; Ok(()) + *v = *v * 2; + Ok(()) }); assert_eq!(AValueQueryWithAnOnEmpty::try_get(), Ok(97 * 2)); diff --git a/substrate/frame/support/src/storage/unhashed.rs b/substrate/frame/support/src/storage/unhashed.rs index 134b3debcd31b5fc17da626d0eced8935e3dd989..f700771b2d5cc990df4031d99dce8640f9d4b1cb 100644 --- a/substrate/frame/support/src/storage/unhashed.rs +++ b/substrate/frame/support/src/storage/unhashed.rs @@ -17,8 +17,8 @@ //! Operation on unhashed runtime storage. +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(key: &[u8]) -> Option { diff --git a/substrate/frame/support/src/storage/weak_bounded_vec.rs b/substrate/frame/support/src/storage/weak_bounded_vec.rs index a98d2182d0919c30f1328d6670f231d613697336..9fa360230691da1aa4d982f7321475f41b63ccc6 100644 --- a/substrate/frame/support/src/storage/weak_bounded_vec.rs +++ b/substrate/frame/support/src/storage/weak_bounded_vec.rs @@ -18,17 +18,16 @@ //! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map //! or a double map. -use sp_std::prelude::*; -use sp_std::{convert::TryFrom, fmt, marker::PhantomData}; -use codec::{Encode, Decode, MaxEncodedLen}; +use crate::{ + storage::{StorageDecodeLength, StorageTryAppend}, + traits::Get, +}; +use codec::{Decode, Encode, MaxEncodedLen}; use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -use crate::{ - traits::Get, - storage::{StorageDecodeLength, StorageTryAppend}, -}; +use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; /// A weakly bounded vector. /// @@ -317,9 +316,9 @@ where #[cfg(test)] pub mod test { use super::*; + use crate::Twox128; use sp_io::TestExternalities; use sp_std::convert::TryInto; - use crate::Twox128; crate::parameter_types! { pub const Seven: u32 = 7; diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index ec47331285ef8ff532e22f00412d943dcbad826b..fcc3305c409c1c87e3b42e0125db6e82c7dd43c9 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -20,62 +20,67 @@ //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. pub mod tokens; -pub use tokens::fungible; -pub use tokens::fungibles; -pub use tokens::currency::{ - Currency, LockIdentifier, LockableCurrency, ReservableCurrency, NamedReservableCurrency, - VestingSchedule, +pub use tokens::{ + currency::{ + Currency, LockIdentifier, LockableCurrency, NamedReservableCurrency, ReservableCurrency, + VestingSchedule, + }, + fungible, fungibles, + imbalance::{Imbalance, OnUnbalanced, SignedImbalance}, + BalanceStatus, ExistenceRequirement, WithdrawReasons, }; -pub use tokens::imbalance::{Imbalance, OnUnbalanced, SignedImbalance}; -pub use tokens::{ExistenceRequirement, WithdrawReasons, BalanceStatus}; mod members; pub use members::{ - Contains, ContainsLengthBound, SortedMembers, InitializeMembers, ChangeMembers, All, IsInVec, - AsContains, + All, AsContains, ChangeMembers, Contains, ContainsLengthBound, InitializeMembers, IsInVec, + SortedMembers, }; mod validation; pub use validation::{ - ValidatorSet, ValidatorSetWithIdentification, OneSessionHandler, FindAuthor, VerifySeal, - EstimateNextNewSession, EstimateNextSessionRotation, KeyOwnerProofSystem, ValidatorRegistration, - Lateness, + EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, KeyOwnerProofSystem, Lateness, + OneSessionHandler, ValidatorRegistration, ValidatorSet, ValidatorSetWithIdentification, + VerifySeal, }; mod filter; pub use filter::{ - Filter, FilterStack, FilterStackGuard, ClearFilterGuard, InstanceFilter, IntegrityTest, - AllowAll, DenyAll, + AllowAll, ClearFilterGuard, DenyAll, Filter, FilterStack, FilterStackGuard, InstanceFilter, + IntegrityTest, }; mod misc; pub use misc::{ - Len, Get, GetDefault, HandleLifetime, TryDrop, Time, UnixTime, IsType, IsSubType, ExecuteBlock, - SameOrOther, OnNewAccount, OnKilledAccount, OffchainWorker, GetBacking, Backing, ExtrinsicCall, - EnsureInherentsAreFirst, ConstU32, + Backing, ConstU32, EnsureInherentsAreFirst, ExecuteBlock, ExtrinsicCall, Get, GetBacking, + GetDefault, HandleLifetime, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, + OnNewAccount, SameOrOther, Time, TryDrop, UnixTime, }; mod stored_map; -pub use stored_map::{StoredMap, StorageMapShim}; +pub use stored_map::{StorageMapShim, StoredMap}; mod randomness; pub use randomness::Randomness; mod metadata; pub use metadata::{ - CallMetadata, GetCallMetadata, GetCallName, PalletInfo, PalletVersion, GetPalletVersion, - PALLET_VERSION_STORAGE_KEY_POSTFIX, PalletInfoAccess, + CallMetadata, GetCallMetadata, GetCallName, GetPalletVersion, PalletInfo, PalletInfoAccess, + PalletVersion, PALLET_VERSION_STORAGE_KEY_POSTFIX, }; mod hooks; -pub use hooks::{Hooks, OnGenesis, OnInitialize, OnFinalize, OnIdle, OnRuntimeUpgrade, OnTimestampSet}; -#[cfg(feature = "try-runtime")] -pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; #[cfg(feature = "std")] pub use hooks::GenesisBuild; +pub use hooks::{ + Hooks, OnFinalize, OnGenesis, OnIdle, OnInitialize, OnRuntimeUpgrade, OnTimestampSet, +}; +#[cfg(feature = "try-runtime")] +pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; pub mod schedule; mod storage; -pub use storage::{Instance, PartialStorageInfoTrait, StorageInstance, StorageInfo, StorageInfoTrait}; +pub use storage::{ + Instance, PartialStorageInfoTrait, StorageInfo, StorageInfoTrait, StorageInstance, +}; mod dispatch; pub use dispatch::{EnsureOrigin, OriginTrait, UnfilteredDispatchable}; diff --git a/substrate/frame/support/src/traits/filter.rs b/substrate/frame/support/src/traits/filter.rs index 4b70fa177e5ca450d9595b4eb56f2bfd0f4457d8..b9f5037abc66ef5e1d4cb83a059b5204e6313699 100644 --- a/substrate/frame/support/src/traits/filter.rs +++ b/substrate/frame/support/src/traits/filter.rs @@ -32,11 +32,15 @@ pub enum AllowAll {} pub enum DenyAll {} impl Filter for AllowAll { - fn filter(_: &T) -> bool { true } + fn filter(_: &T) -> bool { + true + } } impl Filter for DenyAll { - fn filter(_: &T) -> bool { false } + fn filter(_: &T) -> bool { + false + } } /// Trait to add a constraint onto the filter. @@ -101,17 +105,28 @@ pub trait InstanceFilter: Sized + Send + Sync { fn filter(&self, _: &T) -> bool; /// Determines whether `self` matches at least everything that `_o` does. - fn is_superset(&self, _o: &Self) -> bool { false } + fn is_superset(&self, _o: &Self) -> bool { + false + } } impl InstanceFilter for () { - fn filter(&self, _: &T) -> bool { true } - fn is_superset(&self, _o: &Self) -> bool { true } + fn filter(&self, _: &T) -> bool { + true + } + fn is_superset(&self, _o: &Self) -> bool { + true + } } /// Re-expected for the macro. #[doc(hidden)] -pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; +pub use sp_std::{ + boxed::Box, + cell::RefCell, + mem::{swap, take}, + vec::Vec, +}; #[macro_export] macro_rules! impl_filter_stack { @@ -206,7 +221,9 @@ pub mod test_impl_filter_stack { pub struct IsCallable; pub struct BaseFilter; impl Filter for BaseFilter { - fn filter(x: &u32) -> bool { x % 2 == 0 } + fn filter(x: &u32) -> bool { + x % 2 == 0 + } } impl_filter_stack!( crate::traits::filter::test_impl_filter_stack::IsCallable, diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index 5f7b35a9ad25c3a7325cd42ba4b28675b646431d..37b07c3113018ca1a9fee7848a3f0b209fa1cb1a 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -17,9 +17,9 @@ //! Traits for hooking tasks to events in a blockchain's lifecycle. +use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::Saturating; use sp_runtime::traits::MaybeSerializeDeserialize; -use impl_trait_for_tuples::impl_for_tuples; /// The block initialization trait. /// @@ -33,7 +33,9 @@ pub trait OnInitialize { /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, /// including inherent extrinsics. Hence for instance, if you runtime includes /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + 0 + } } #[impl_for_tuples(30)] @@ -71,7 +73,7 @@ pub trait OnIdle { /// in a block are applied but before `on_finalize` is executed. fn on_idle( _n: BlockNumber, - _remaining_weight: crate::weights::Weight + _remaining_weight: crate::weights::Weight, ) -> crate::weights::Weight { 0 } @@ -79,7 +81,7 @@ pub trait OnIdle { #[impl_for_tuples(30)] impl OnIdle for Tuple { - fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { + fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { let mut weight = 0; for_tuples!( #( let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); @@ -170,13 +172,17 @@ pub trait OnRuntimeUpgrade { /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { Ok(()) } + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } /// Execute some post-checks after a runtime upgrade. /// /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { Ok(()) } + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } } #[impl_for_tuples(30)] @@ -214,7 +220,7 @@ pub trait Hooks { /// and pass the result to the next `on_idle` hook if it exists. fn on_idle( _n: BlockNumber, - _remaining_weight: crate::weights::Weight + _remaining_weight: crate::weights::Weight, ) -> crate::weights::Weight { 0 } @@ -222,7 +228,9 @@ pub trait Hooks { /// The block is being initialized. Implement to have something happen. /// /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + 0 + } /// Perform a module upgrade. /// @@ -238,7 +246,9 @@ pub trait Hooks { /// block local data are not accessible. /// /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { 0 } + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 + } /// Execute some pre-checks prior to a runtime upgrade. /// @@ -282,7 +292,7 @@ pub trait Hooks { /// A trait to define the build function of a genesis config, T and I are placeholder for pallet /// trait and pallet instance. #[cfg(feature = "std")] -pub trait GenesisBuild: Default + MaybeSerializeDeserialize { +pub trait GenesisBuild: Default + MaybeSerializeDeserialize { /// The build function is called within an externalities allowing storage APIs. /// Thus one can write to storage using regular pallet storages. fn build(&self); diff --git a/substrate/frame/support/src/traits/members.rs b/substrate/frame/support/src/traits/members.rs index 8b9c2c90f541dd7bd674abd2abfd1a72b6660a39..dbfc2e0120e4ec049d779d0fb815a17ee292f21e 100644 --- a/substrate/frame/support/src/traits/members.rs +++ b/substrate/frame/support/src/traits/members.rs @@ -17,7 +17,7 @@ //! Traits for dealing with the idea of membership. -use sp_std::{prelude::*, marker::PhantomData}; +use sp_std::{marker::PhantomData, prelude::*}; /// A trait for querying whether a type can be said to "contain" a value. pub trait Contains { @@ -28,7 +28,9 @@ pub trait Contains { /// A `Contains` implementation which always returns `true`. pub struct All(PhantomData); impl Contains for All { - fn contains(_: &T) -> bool { true } + fn contains(_: &T) -> bool { + true + } } #[impl_trait_for_tuples::impl_for_tuples(30)] @@ -77,32 +79,46 @@ pub trait SortedMembers { fn sorted_members() -> Vec; /// Return `true` if this "contains" the given value `t`. - fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } + fn contains(t: &T) -> bool { + Self::sorted_members().binary_search(t).is_ok() + } /// Get the number of items in the set. - fn count() -> usize { Self::sorted_members().len() } + fn count() -> usize { + Self::sorted_members().len() + } /// Add an item that would satisfy `contains`. It does not make sure any other /// state is correctly maintained or generated. /// /// **Should be used for benchmarking only!!!** #[cfg(feature = "runtime-benchmarks")] - fn add(_t: &T) { unimplemented!() } + fn add(_t: &T) { + unimplemented!() + } } /// Adapter struct for turning an `OrderedMembership` impl into a `Contains` impl. pub struct AsContains(PhantomData<(OM,)>); impl> Contains for AsContains { - fn contains(t: &T) -> bool { OM::contains(t) } + fn contains(t: &T) -> bool { + OM::contains(t) + } } /// Trivial utility for implementing `Contains`/`OrderedMembership` with a `Vec`. pub struct IsInVec(PhantomData); impl>> Contains for IsInVec { - fn contains(t: &X) -> bool { T::get().contains(t) } + fn contains(t: &X) -> bool { + T::get().contains(t) + } } impl>> SortedMembers for IsInVec { - fn sorted_members() -> Vec { let mut r = T::get(); r.sort(); r } + fn sorted_members() -> Vec { + let mut r = T::get(); + r.sort(); + r + } } /// A trait for querying bound for the length of an implementation of `Contains` @@ -174,19 +190,19 @@ pub trait ChangeMembers { (Some(old), Some(new)) if old == new => { old_i = old_iter.next(); new_i = new_iter.next(); - } + }, (Some(old), Some(new)) if old < new => { outgoing.push(old.clone()); old_i = old_iter.next(); - } + }, (Some(old), None) => { outgoing.push(old.clone()); old_i = old_iter.next(); - } + }, (_, Some(new)) => { incoming.push(new.clone()); new_i = new_iter.next(); - } + }, } } (incoming, outgoing) diff --git a/substrate/frame/support/src/traits/metadata.rs b/substrate/frame/support/src/traits/metadata.rs index b13a0464b30c004940f6b741264c0eda4ba4aced..ba263056384481d0561fd489f5a3afa78f5fc023 100644 --- a/substrate/frame/support/src/traits/metadata.rs +++ b/substrate/frame/support/src/traits/metadata.rs @@ -17,7 +17,7 @@ //! Traits for managing information attached to pallets and their constituents. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_runtime::RuntimeDebug; /// Provides information about the pallet setup in the runtime. @@ -91,11 +91,7 @@ pub struct PalletVersion { impl PalletVersion { /// Creates a new instance of `Self`. pub fn new(major: u16, minor: u8, patch: u8) -> Self { - Self { - major, - minor, - patch, - } + Self { major, minor, patch } } /// Returns the storage key for a pallet version. @@ -139,13 +135,10 @@ impl PalletVersion { impl sp_std::cmp::PartialOrd for PalletVersion { fn partial_cmp(&self, other: &Self) -> Option { - let res = self.major + let res = self + .major .cmp(&other.major) - .then_with(|| - self.minor - .cmp(&other.minor) - .then_with(|| self.patch.cmp(&other.patch) - )); + .then_with(|| self.minor.cmp(&other.minor).then_with(|| self.patch.cmp(&other.patch))); Some(res) } diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 9cab2626cd6cbc2f83cabb0b156dc53ca0c14112..d6eb8331cdb5d67f23858da2e46ef2fc11dca257 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -17,9 +17,9 @@ //! Smaller traits used in FRAME which don't need their own file. -use sp_runtime::{traits::Block as BlockT, DispatchError}; -use sp_arithmetic::traits::AtLeast32Bit; use crate::dispatch::Parameter; +use sp_arithmetic::traits::AtLeast32Bit; +use sp_runtime::{traits::Block as BlockT, DispatchError}; /// Anything that can have a `::len()` method. pub trait Len { @@ -27,7 +27,10 @@ pub trait Len { fn len(&self) -> usize; } -impl Len for T where ::IntoIter: ExactSizeIterator { +impl Len for T +where + ::IntoIter: ExactSizeIterator, +{ fn len(&self) -> usize { self.clone().into_iter().len() } @@ -42,7 +45,9 @@ pub trait Get { } impl Get for () { - fn get() -> T { T::default() } + fn get() -> T { + T::default() + } } /// Implement Get by returning Default for any type that implements Default. @@ -123,7 +128,10 @@ impl SameOrOther { } } - pub fn same(self) -> Result where A: Default { + pub fn same(self) -> Result + where + A: Default, + { match self { SameOrOther::Same(a) => Ok(a), SameOrOther::None => Ok(A::default()), @@ -131,7 +139,10 @@ impl SameOrOther { } } - pub fn other(self) -> Result where B: Default { + pub fn other(self) -> Result + where + B: Default, + { match self { SameOrOther::Same(a) => Err(a), SameOrOther::None => Ok(B::default()), @@ -157,10 +168,14 @@ pub trait OnKilledAccount { /// A simple, generic one-parameter event notifier/handler. pub trait HandleLifetime { /// An account was created. - fn created(_t: &T) -> Result<(), DispatchError> { Ok(()) } + fn created(_t: &T) -> Result<(), DispatchError> { + Ok(()) + } /// An account was killed. - fn killed(_t: &T) -> Result<(), DispatchError> { Ok(()) } + fn killed(_t: &T) -> Result<(), DispatchError> { + Ok(()) + } } impl HandleLifetime for () {} @@ -195,10 +210,18 @@ pub trait IsType: Into + From { } impl IsType for T { - fn from_ref(t: &T) -> &Self { t } - fn into_ref(&self) -> &T { self } - fn from_mut(t: &mut T) -> &mut Self { t } - fn into_mut(&mut self) -> &mut T { self } + fn from_ref(t: &T) -> &Self { + t + } + fn into_ref(&self) -> &T { + self + } + fn from_mut(t: &mut T) -> &mut Self { + t + } + fn into_mut(&mut self) -> &mut T { + self + } } /// Something that can be checked to be a of sub type `T`. @@ -300,8 +323,6 @@ pub trait GetBacking { fn get_backing(&self) -> Option; } - - /// A trait to ensure the inherent are before non-inherent in a block. /// /// This is typically implemented on runtime, through `construct_runtime!`. @@ -319,7 +340,8 @@ pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { } #[cfg(feature = "std")] -impl ExtrinsicCall for sp_runtime::testing::TestXt where +impl ExtrinsicCall for sp_runtime::testing::TestXt +where Call: codec::Codec + Sync + Send, { fn call(&self) -> &Self::Call { @@ -328,7 +350,7 @@ impl ExtrinsicCall for sp_runtime::testing::TestXt whe } impl ExtrinsicCall -for sp_runtime::generic::UncheckedExtrinsic + for sp_runtime::generic::UncheckedExtrinsic where Extra: sp_runtime::traits::SignedExtension, { diff --git a/substrate/frame/support/src/traits/schedule.rs b/substrate/frame/support/src/traits/schedule.rs index 58e4c419f2813b04c9471f82c2f31ad01daa4eda..10a973a993df825935af50e67830677d94d7c4ea 100644 --- a/substrate/frame/support/src/traits/schedule.rs +++ b/substrate/frame/support/src/traits/schedule.rs @@ -17,9 +17,9 @@ //! Traits and associated utilities for scheduling dispatchables in FRAME. -use sp_std::{prelude::*, fmt::Debug}; -use codec::{Encode, Decode, Codec, EncodeLike}; -use sp_runtime::{RuntimeDebug, DispatchError}; +use codec::{Codec, Decode, Encode, EncodeLike}; +use sp_runtime::{DispatchError, RuntimeDebug}; +use sp_std::{fmt::Debug, prelude::*}; /// Information relating to the period of a scheduled task. First item is the length of the /// period and the second is the number of times it should be executed in total before the task @@ -61,7 +61,7 @@ pub trait Anon { maybe_periodic: Option>, priority: Priority, origin: Origin, - call: Call + call: Call, ) -> Result; /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, @@ -107,7 +107,7 @@ pub trait Named { maybe_periodic: Option>, priority: Priority, origin: Origin, - call: Call + call: Call, ) -> Result; /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances diff --git a/substrate/frame/support/src/traits/stored_map.rs b/substrate/frame/support/src/traits/stored_map.rs index 0e1660df546f43942d938e8eed843a2b60bab99b..715a5211be430082b05581010fa42eea24892a1a 100644 --- a/substrate/frame/support/src/traits/stored_map.rs +++ b/substrate/frame/support/src/traits/stored_map.rs @@ -17,10 +17,9 @@ //! Traits and associated datatypes for managing abstract stored values. +use crate::{storage::StorageMap, traits::misc::HandleLifetime}; use codec::FullCodec; use sp_runtime::DispatchError; -use crate::storage::StorageMap; -use crate::traits::misc::HandleLifetime; /// An abstraction of a value stored within storage, but possibly as part of a larger composite /// item. @@ -47,25 +46,26 @@ pub trait StoredMap { let r = f(&mut account); *x = Some(account); r - } + }, }) } /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. /// /// This is infallible as long as the value does not get destroyed. - fn mutate_exists( - k: &K, - f: impl FnOnce(&mut Option) -> R, - ) -> Result { + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) } /// Set the item to something new. - fn insert(k: &K, t: T) -> Result<(), DispatchError> { Self::mutate(k, |i| *i = t) } + fn insert(k: &K, t: T) -> Result<(), DispatchError> { + Self::mutate(k, |i| *i = t) + } /// Remove the item or otherwise replace it with its default value; we don't care which. - fn remove(k: &K) -> Result<(), DispatchError> { Self::mutate_exists(k, |x| *x = None) } + fn remove(k: &K) -> Result<(), DispatchError> { + Self::mutate_exists(k, |x| *x = None) + } } /// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this @@ -81,12 +81,15 @@ pub trait StoredMap { /// system module's `CallOnCreatedAccount` and `CallKillAccount`. pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); impl< - S: StorageMap, - L: HandleLifetime, - K: FullCodec, - T: FullCodec + Default, -> StoredMap for StorageMapShim { - fn get(k: &K) -> T { S::get(k) } + S: StorageMap, + L: HandleLifetime, + K: FullCodec, + T: FullCodec + Default, + > StoredMap for StorageMapShim +{ + fn get(k: &K) -> T { + S::get(k) + } fn insert(k: &K, t: T) -> Result<(), DispatchError> { if !S::contains_key(&k) { L::created(k)?; diff --git a/substrate/frame/support/src/traits/tokens.rs b/substrate/frame/support/src/traits/tokens.rs index faf8ebfd306cea7bdbd33bd29a364bad0696af8b..aca62bcad65c74cc4bf39843d7fb00f53b34a2c7 100644 --- a/substrate/frame/support/src/traits/tokens.rs +++ b/substrate/frame/support/src/traits/tokens.rs @@ -17,15 +17,15 @@ //! Traits for working with tokens and their associated datastructures. +pub mod currency; pub mod fungible; pub mod fungibles; -pub mod currency; pub mod imbalance; +mod misc; pub mod nonfungible; pub mod nonfungibles; -mod misc; +pub use imbalance::Imbalance; pub use misc::{ - BalanceConversion, BalanceStatus, DepositConsequence, - ExistenceRequirement, WithdrawConsequence, WithdrawReasons, + BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, + WithdrawConsequence, WithdrawReasons, }; -pub use imbalance::Imbalance; diff --git a/substrate/frame/support/src/traits/tokens/currency.rs b/substrate/frame/support/src/traits/tokens/currency.rs index 7882d04c035bf0df50f91c104382f5e3de2db76f..6c73a1527b481e9cf3044e1938e4df58aa329924 100644 --- a/substrate/frame/support/src/traits/tokens/currency.rs +++ b/substrate/frame/support/src/traits/tokens/currency.rs @@ -17,17 +17,19 @@ //! The Currency trait and associated types. -use sp_std::fmt::Debug; -use sp_runtime::traits::MaybeSerializeDeserialize; -use crate::dispatch::{DispatchResult, DispatchError}; -use super::misc::{Balance, WithdrawReasons, ExistenceRequirement}; -use super::imbalance::{Imbalance, SignedImbalance}; +use super::{ + imbalance::{Imbalance, SignedImbalance}, + misc::{Balance, ExistenceRequirement, WithdrawReasons}, +}; +use crate::dispatch::{DispatchError, DispatchResult}; use codec::MaxEncodedLen; +use sp_runtime::traits::MaybeSerializeDeserialize; +use sp_std::fmt::Debug; mod reservable; -pub use reservable::{ReservableCurrency, NamedReservableCurrency}; +pub use reservable::{NamedReservableCurrency, ReservableCurrency}; mod lockable; -pub use lockable::{LockableCurrency, VestingSchedule, LockIdentifier}; +pub use lockable::{LockIdentifier, LockableCurrency, VestingSchedule}; /// Abstraction over a fungible assets system. pub trait Currency { @@ -36,11 +38,11 @@ pub trait Currency { /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. - type PositiveImbalance: Imbalance; + type PositiveImbalance: Imbalance; /// The opaque token type for an imbalance. This is returned by unbalanced operations /// and must be dealt with. It may be dropped but cannot be cloned. - type NegativeImbalance: Imbalance; + type NegativeImbalance: Imbalance; // PUBLIC IMMUTABLES @@ -123,17 +125,14 @@ pub trait Currency { /// /// As much funds up to `value` will be deducted as possible. If this is less than `value`, /// then a non-zero second item will be returned. - fn slash( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); + fn slash(who: &AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance); /// Mints `value` to the free balance of `who`. /// /// If `who` doesn't exist, nothing is done and an Err returned. fn deposit_into_existing( who: &AccountId, - value: Self::Balance + value: Self::Balance, ) -> Result; /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on @@ -152,17 +151,11 @@ pub trait Currency { /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. /// /// Infallible. - fn deposit_creating( - who: &AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance; + fn deposit_creating(who: &AccountId, value: Self::Balance) -> Self::PositiveImbalance; /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on /// success. - fn resolve_creating( - who: &AccountId, - value: Self::NegativeImbalance, - ) { + fn resolve_creating(who: &AccountId, value: Self::NegativeImbalance) { let v = value.peek(); drop(value.offset(Self::deposit_creating(who, v))); } diff --git a/substrate/frame/support/src/traits/tokens/currency/lockable.rs b/substrate/frame/support/src/traits/tokens/currency/lockable.rs index ed3d1cf46362ba8b8264480e681b01b678e3dc37..94bce216dcbcb7f9535e4b510686d981cd4a422e 100644 --- a/substrate/frame/support/src/traits/tokens/currency/lockable.rs +++ b/substrate/frame/support/src/traits/tokens/currency/lockable.rs @@ -17,10 +17,8 @@ //! The lockable currency trait and some associated types. -use crate::dispatch::DispatchResult; -use crate::traits::misc::Get; -use super::Currency; -use super::super::misc::WithdrawReasons; +use super::{super::misc::WithdrawReasons, Currency}; +use crate::{dispatch::DispatchResult, traits::misc::Get}; /// An identifier for a lock. Used for disambiguating different locks so that /// they can be individually replaced or removed. @@ -63,10 +61,7 @@ pub trait LockableCurrency: Currency { ); /// Remove an existing lock. - fn remove_lock( - id: LockIdentifier, - who: &AccountId, - ); + fn remove_lock(id: LockIdentifier, who: &AccountId); } /// A vesting schedule over a currency. This allows a particular currency to have vesting limits @@ -80,7 +75,8 @@ pub trait VestingSchedule { /// Get the amount that is currently being vested and cannot be transferred out of this account. /// Returns `None` if the account has no vesting schedule. - fn vesting_balance(who: &AccountId) -> Option<>::Balance>; + fn vesting_balance(who: &AccountId) + -> Option<>::Balance>; /// Adds a vesting schedule to a given account. /// diff --git a/substrate/frame/support/src/traits/tokens/currency/reservable.rs b/substrate/frame/support/src/traits/tokens/currency/reservable.rs index 69017357cfa848af2021ce8ee7567d21a88936a8..41220ca81cacd99467b0faf844d64aa82106743d 100644 --- a/substrate/frame/support/src/traits/tokens/currency/reservable.rs +++ b/substrate/frame/support/src/traits/tokens/currency/reservable.rs @@ -17,9 +17,8 @@ //! The reservable currency trait. -use super::Currency; -use super::super::misc::BalanceStatus; -use crate::dispatch::{DispatchResult, DispatchError}; +use super::{super::misc::BalanceStatus, Currency}; +use crate::dispatch::{DispatchError, DispatchResult}; /// A currency where funds can be reserved from the user. pub trait ReservableCurrency: Currency { @@ -33,7 +32,7 @@ pub trait ReservableCurrency: Currency { /// is less than `value`, then a non-zero second item will be returned. fn slash_reserved( who: &AccountId, - value: Self::Balance + value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance); /// The amount of the balance of a given account that is externally reserved; this can still get @@ -94,7 +93,7 @@ pub trait NamedReservableCurrency: ReservableCurrency { fn slash_reserved_named( id: &Self::ReserveIdentifier, who: &AccountId, - value: Self::Balance + value: Self::Balance, ) -> (Self::NegativeImbalance, Self::Balance); /// The amount of the balance of a given account that is externally reserved; this can still get @@ -114,7 +113,11 @@ pub trait NamedReservableCurrency: ReservableCurrency { /// /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will /// be returned to notify of this. This is different behavior than `unreserve`. - fn reserve_named(id: &Self::ReserveIdentifier, who: &AccountId, value: Self::Balance) -> DispatchResult; + fn reserve_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance, + ) -> DispatchResult; /// Moves up to `value` from reserved balance to free balance. This function cannot fail. /// @@ -126,7 +129,11 @@ pub trait NamedReservableCurrency: ReservableCurrency { /// - This is different from `reserve`. /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will /// invoke `on_reserved_too_low` and could reap the account. - fn unreserve_named(id: &Self::ReserveIdentifier, who: &AccountId, value: Self::Balance) -> Self::Balance; + fn unreserve_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance, + ) -> Self::Balance; /// Moves up to `value` from reserved balance of account `slashed` to balance of account /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be @@ -147,16 +154,21 @@ pub trait NamedReservableCurrency: ReservableCurrency { /// /// This will reserve extra amount of current reserved balance is less than `value`. /// And unreserve if current reserved balance is greater than `value`. - fn ensure_reserved_named(id: &Self::ReserveIdentifier, who: &AccountId, value: Self::Balance) -> DispatchResult { + fn ensure_reserved_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + value: Self::Balance, + ) -> DispatchResult { let current = Self::reserved_balance_named(id, who); - if current > value { + if current > value { // we always have enough balance to unreserve here Self::unreserve_named(id, who, current - value); Ok(()) } else if value > current { // we checked value > current Self::reserve_named(id, who, value - current) - } else { // current == value + } else { + // current == value Ok(()) } } @@ -173,7 +185,10 @@ pub trait NamedReservableCurrency: ReservableCurrency { /// Slash all the reserved balance, returning the negative imbalance created. /// /// Is a no-op if the value to be slashed is zero. - fn slash_all_reserved_named(id: &Self::ReserveIdentifier, who: &AccountId) -> Self::NegativeImbalance { + fn slash_all_reserved_named( + id: &Self::ReserveIdentifier, + who: &AccountId, + ) -> Self::NegativeImbalance { let value = Self::reserved_balance_named(id, who); Self::slash_reserved_named(id, who, value).0 } diff --git a/substrate/frame/support/src/traits/tokens/fungible.rs b/substrate/frame/support/src/traits/tokens/fungible.rs index 5472212aaa65ec597a44fa943afa4e4fa94c1ece..b033236d447bb2c35a7ef8d65343575711ce99fc 100644 --- a/substrate/frame/support/src/traits/tokens/fungible.rs +++ b/substrate/frame/support/src/traits/tokens/fungible.rs @@ -17,16 +17,20 @@ //! The traits for dealing with a single fungible token class and any associated types. -use super::*; +use super::{ + misc::{Balance, DepositConsequence, WithdrawConsequence}, + *, +}; +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::misc::Get, +}; use sp_runtime::traits::Saturating; -use crate::traits::misc::Get; -use crate::dispatch::{DispatchResult, DispatchError}; -use super::misc::{DepositConsequence, WithdrawConsequence, Balance}; mod balanced; mod imbalance; pub use balanced::{Balanced, Unbalanced}; -pub use imbalance::{Imbalance, HandleImbalanceDrop, DebtOf, CreditOf}; +pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; /// Trait for providing balance-inspection access to a fungible asset. pub trait Inspect { @@ -84,7 +88,10 @@ pub trait Mutate: Inspect { let extra = Self::can_withdraw(&source, amount).into_result()?; Self::can_deposit(&dest, amount.saturating_add(extra)).into_result()?; let actual = Self::burn_from(source, amount)?; - debug_assert!(actual == amount.saturating_add(extra), "can_withdraw must agree with withdraw; qed"); + debug_assert!( + actual == amount.saturating_add(extra), + "can_withdraw must agree with withdraw; qed" + ); match Self::mint_into(dest, actual) { Ok(_) => Ok(actual), Err(err) => { @@ -93,7 +100,7 @@ pub trait Mutate: Inspect { let revert = Self::mint_into(source, actual); debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); Err(err) - } + }, } } } @@ -129,8 +136,11 @@ pub trait MutateHold: InspectHold + Transfer { /// /// If `best_effort` is `true`, then the amount actually unreserved and returned as the inner /// value of `Ok` may be smaller than the `amount` passed. - fn release(who: &AccountId, amount: Self::Balance, best_effort: bool) - -> Result; + fn release( + who: &AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result; /// Transfer held funds into a destination account. /// @@ -160,17 +170,17 @@ pub trait BalancedHold: Balanced + MutateHold { /// /// As much funds that are on hold up to `amount` will be deducted as possible. If this is less /// than `amount`, then a non-zero second item will be returned. - fn slash_held(who: &AccountId, amount: Self::Balance) - -> (CreditOf, Self::Balance); + fn slash_held( + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); } -impl< - AccountId, - T: Balanced + MutateHold, -> BalancedHold for T { - fn slash_held(who: &AccountId, amount: Self::Balance) - -> (CreditOf, Self::Balance) - { +impl + MutateHold> BalancedHold for T { + fn slash_held( + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance) { let actual = match Self::release(who, amount, true) { Ok(x) => x, Err(_) => return (Imbalance::default(), amount), @@ -185,15 +195,14 @@ pub struct ItemOf< F: fungibles::Inspect, A: Get<>::AssetId>, AccountId, ->( - sp_std::marker::PhantomData<(F, A, AccountId)> -); +>(sp_std::marker::PhantomData<(F, A, AccountId)>); impl< - F: fungibles::Inspect, - A: Get<>::AssetId>, - AccountId, -> Inspect for ItemOf { + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, + > Inspect for ItemOf +{ type Balance = >::Balance; fn total_issuance() -> Self::Balance { >::total_issuance(A::get()) @@ -216,10 +225,11 @@ impl< } impl< - F: fungibles::Mutate, - A: Get<>::AssetId>, - AccountId, -> Mutate for ItemOf { + F: fungibles::Mutate, + A: Get<>::AssetId>, + AccountId, + > Mutate for ItemOf +{ fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult { >::mint_into(A::get(), who, amount) } @@ -229,22 +239,27 @@ impl< } impl< - F: fungibles::Transfer, - A: Get<>::AssetId>, - AccountId, -> Transfer for ItemOf { - fn transfer(source: &AccountId, dest: &AccountId, amount: Self::Balance, keep_alive: bool) - -> Result - { + F: fungibles::Transfer, + A: Get<>::AssetId>, + AccountId, + > Transfer for ItemOf +{ + fn transfer( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result { >::transfer(A::get(), source, dest, amount, keep_alive) } } impl< - F: fungibles::InspectHold, - A: Get<>::AssetId>, - AccountId, -> InspectHold for ItemOf { + F: fungibles::InspectHold, + A: Get<>::AssetId>, + AccountId, + > InspectHold for ItemOf +{ fn balance_on_hold(who: &AccountId) -> Self::Balance { >::balance_on_hold(A::get(), who) } @@ -254,16 +269,19 @@ impl< } impl< - F: fungibles::MutateHold, - A: Get<>::AssetId>, - AccountId, -> MutateHold for ItemOf { + F: fungibles::MutateHold, + A: Get<>::AssetId>, + AccountId, + > MutateHold for ItemOf +{ fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult { >::hold(A::get(), who, amount) } - fn release(who: &AccountId, amount: Self::Balance, best_effort: bool) - -> Result - { + fn release( + who: &AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result { >::release(A::get(), who, amount, best_effort) } fn transfer_held( @@ -285,23 +303,30 @@ impl< } impl< - F: fungibles::Unbalanced, - A: Get<>::AssetId>, - AccountId, -> Unbalanced for ItemOf { + F: fungibles::Unbalanced, + A: Get<>::AssetId>, + AccountId, + > Unbalanced for ItemOf +{ fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult { >::set_balance(A::get(), who, amount) } fn set_total_issuance(amount: Self::Balance) -> () { >::set_total_issuance(A::get(), amount) } - fn decrease_balance(who: &AccountId, amount: Self::Balance) -> Result { + fn decrease_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { >::decrease_balance(A::get(), who, amount) } fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { >::decrease_balance_at_most(A::get(), who, amount) } - fn increase_balance(who: &AccountId, amount: Self::Balance) -> Result { + fn increase_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { >::increase_balance(A::get(), who, amount) } fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { diff --git a/substrate/frame/support/src/traits/tokens/fungible/balanced.rs b/substrate/frame/support/src/traits/tokens/fungible/balanced.rs index 1cd0fcf0ca414ea8f4461d118866d77147a5fddb..a54b29a9d91344bacc990571a722d25948dfb7f5 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/balanced.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/balanced.rs @@ -18,12 +18,16 @@ //! The trait and associated types for sets of fungible tokens that manage total issuance without //! requiring atomic balanced operations. -use super::*; +use super::{super::Imbalance as ImbalanceT, *}; +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::misc::{SameOrOther, TryDrop}, +}; +use sp_runtime::{ + traits::{CheckedAdd, Zero}, + ArithmeticError, TokenError, +}; use sp_std::marker::PhantomData; -use sp_runtime::{TokenError, ArithmeticError, traits::{CheckedAdd, Zero}}; -use super::super::Imbalance as ImbalanceT; -use crate::traits::misc::{SameOrOther, TryDrop}; -use crate::dispatch::{DispatchResult, DispatchError}; /// A fungible token class where any creation and deletion of tokens is semi-explicit and where the /// total supply is maintained automatically. @@ -65,10 +69,7 @@ pub trait Balanced: Inspect { /// /// As much funds up to `value` will be deducted as possible. If this is less than `value`, /// then a non-zero second item will be returned. - fn slash( - who: &AccountId, - amount: Self::Balance, - ) -> (CreditOf, Self::Balance); + fn slash(who: &AccountId, amount: Self::Balance) -> (CreditOf, Self::Balance); /// Mints exactly `value` into the account of `who`. /// @@ -90,7 +91,7 @@ pub trait Balanced: Inspect { fn withdraw( who: &AccountId, value: Self::Balance, - //TODO: liveness: ExistenceRequirement, + // TODO: liveness: ExistenceRequirement, ) -> Result, DispatchError>; /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` @@ -119,7 +120,7 @@ pub trait Balanced: Inspect { fn settle( who: &AccountId, debt: DebtOf, - //TODO: liveness: ExistenceRequirement, + // TODO: liveness: ExistenceRequirement, ) -> Result, DebtOf> { let amount = debt.peek(); let credit = match Self::withdraw(who, amount) { @@ -132,7 +133,7 @@ pub trait Balanced: Inspect { SameOrOther::Other(rest) => { debug_assert!(false, "ok withdraw return must be at least debt value; qed"); Err(rest) - } + }, } } } @@ -158,9 +159,10 @@ pub trait Unbalanced: Inspect { /// /// Minimum balance will be respected and the returned imbalance may be up to /// `Self::minimum_balance() - 1` greater than `amount`. - fn decrease_balance(who: &AccountId, amount: Self::Balance) - -> Result - { + fn decrease_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { let old_balance = Self::balance(who); let (mut new_balance, mut amount) = if old_balance < amount { Err(TokenError::NoFunds)? @@ -182,9 +184,7 @@ pub trait Unbalanced: Inspect { /// `Self::minimum_balance() - 1` greater than `amount`. /// /// Return the imbalance by which the account was reduced. - fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) - -> Self::Balance - { + fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { let old_balance = Self::balance(who); let (mut new_balance, mut amount) = if old_balance < amount { (Zero::zero(), old_balance) @@ -217,9 +217,10 @@ pub trait Unbalanced: Inspect { /// /// Minimum balance will be respected and an error will be returned if /// `amount < Self::minimum_balance()` when the account of `who` is zero. - fn increase_balance(who: &AccountId, amount: Self::Balance) - -> Result - { + fn increase_balance( + who: &AccountId, + amount: Self::Balance, + ) -> Result { let old_balance = Self::balance(who); let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; if new_balance < Self::minimum_balance() { @@ -237,9 +238,7 @@ pub trait Unbalanced: Inspect { /// `amount < Self::minimum_balance()`. /// /// Return the imbalance by which the account was increased. - fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) - -> Self::Balance - { + fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { let old_balance = Self::balance(who); let mut new_balance = old_balance.saturating_add(amount); let mut amount = new_balance - old_balance; @@ -300,16 +299,12 @@ type Debt = Imbalance< >; /// Create some `Credit` item. Only for internal use. -fn credit>( - amount: U::Balance, -) -> Credit { +fn credit>(amount: U::Balance) -> Credit { Imbalance::new(amount) } /// Create some `Debt` item. Only for internal use. -fn debt>( - amount: U::Balance, -) -> Debt { +fn debt>(amount: U::Balance) -> Debt { Imbalance::new(amount) } @@ -328,10 +323,7 @@ impl> Balanced for U { U::set_total_issuance(new); credit(new - old) } - fn slash( - who: &AccountId, - amount: Self::Balance, - ) -> (Credit, Self::Balance) { + fn slash(who: &AccountId, amount: Self::Balance) -> (Credit, Self::Balance) { let slashed = U::decrease_balance_at_most(who, amount); // `slashed` could be less than, greater than or equal to `amount`. // If slashed == amount, it means the account had at least amount in it and it could all be @@ -344,7 +336,7 @@ impl> Balanced for U { } fn deposit( who: &AccountId, - amount: Self::Balance + amount: Self::Balance, ) -> Result, DispatchError> { let increase = U::increase_balance(who, amount)?; Ok(debt(increase)) @@ -352,7 +344,7 @@ impl> Balanced for U { fn withdraw( who: &AccountId, amount: Self::Balance, - //TODO: liveness: ExistenceRequirement, + // TODO: liveness: ExistenceRequirement, ) -> Result, DispatchError> { let decrease = U::decrease_balance(who, amount)?; Ok(credit(decrease)) diff --git a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs index ab3694359ce9c2e96ea84fb6156e1eaccf1c22d8..e6d3b5bed66aaa63683b5fe46ca08c1d77f69ebd 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -18,13 +18,10 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. -use super::*; +use super::{super::Imbalance as ImbalanceT, balanced::Balanced, misc::Balance, *}; +use crate::traits::misc::{SameOrOther, TryDrop}; +use sp_runtime::{traits::Zero, RuntimeDebug}; use sp_std::marker::PhantomData; -use sp_runtime::{RuntimeDebug, traits::Zero}; -use super::misc::Balance; -use super::balanced::Balanced; -use crate::traits::misc::{TryDrop, SameOrOther}; -use super::super::Imbalance as ImbalanceT; /// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or /// debt (positive) imbalance. @@ -49,11 +46,9 @@ pub struct Imbalance< _phantom: PhantomData<(OnDrop, OppositeOnDrop)>, } -impl< - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop -> Drop for Imbalance { +impl, OppositeOnDrop: HandleImbalanceDrop> Drop + for Imbalance +{ fn drop(&mut self) { if !self.amount.is_zero() { OnDrop::handle(self.amount) @@ -61,42 +56,34 @@ impl< } } -impl< - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop, -> TryDrop for Imbalance { +impl, OppositeOnDrop: HandleImbalanceDrop> TryDrop + for Imbalance +{ /// Drop an instance cleanly. Only works if its value represents "no-operation". fn try_drop(self) -> Result<(), Self> { self.drop_zero() } } -impl< - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop, -> Default for Imbalance { +impl, OppositeOnDrop: HandleImbalanceDrop> Default + for Imbalance +{ fn default() -> Self { Self::zero() } } -impl< - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop, -> Imbalance { +impl, OppositeOnDrop: HandleImbalanceDrop> + Imbalance +{ pub(crate) fn new(amount: B) -> Self { Self { amount, _phantom: PhantomData } } } -impl< - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop, -> ImbalanceT for Imbalance { +impl, OppositeOnDrop: HandleImbalanceDrop> + ImbalanceT for Imbalance +{ type Opposite = Imbalance; fn zero() -> Self { @@ -127,9 +114,10 @@ impl< self.amount = self.amount.saturating_add(other.amount); sp_std::mem::forget(other); } - fn offset(self, other: Imbalance) - -> SameOrOther> - { + fn offset( + self, + other: Imbalance, + ) -> SameOrOther> { let (a, b) = (self.amount, other.amount); sp_std::mem::forget((self, other)); diff --git a/substrate/frame/support/src/traits/tokens/fungibles.rs b/substrate/frame/support/src/traits/tokens/fungibles.rs index 490f28dfb453a0c75213536949177bf2c7e8b65e..3f5a1c75860c285b6f4af411976fb5a5687f6251 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles.rs @@ -17,15 +17,17 @@ //! The traits for sets of fungible tokens and any associated types. -use super::*; +use super::{ + misc::{AssetId, Balance}, + *, +}; use crate::dispatch::{DispatchError, DispatchResult}; -use super::misc::{AssetId, Balance}; use sp_runtime::traits::Saturating; mod balanced; pub use balanced::{Balanced, Unbalanced}; mod imbalance; -pub use imbalance::{Imbalance, HandleImbalanceDrop, DebtOf, CreditOf}; +pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; /// Trait for providing balance-inspection access to a set of named fungible assets. pub trait Inspect { @@ -48,8 +50,11 @@ pub trait Inspect { fn reducible_balance(asset: Self::AssetId, who: &AccountId, keep_alive: bool) -> Self::Balance; /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. - fn can_deposit(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> DepositConsequence; + fn can_deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> DepositConsequence; /// Returns `Failed` if the `asset` balance of `who` may not be decreased by `amount`, otherwise /// the consequence. @@ -87,8 +92,11 @@ pub trait Mutate: Inspect { /// Due to minimum balance requirements, it's possible that the amount withdrawn could be up to /// `Self::minimum_balance() - 1` more than the `amount`. The total amount withdrawn is returned /// in an `Ok` result. This may be safely ignored if you don't mind the overall supply reducing. - fn burn_from(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> Result; + fn burn_from( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result; /// Attempt to reduce the `asset` balance of `who` by as much as possible up to `amount`, and /// possibly slightly more due to minimum_balance requirements. If no decrease is possible then @@ -97,9 +105,11 @@ pub trait Mutate: Inspect { /// /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure /// that is doesn't fail. - fn slash(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> Result - { + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { Self::burn_from(asset, who, Self::reducible_balance(asset, who, false).min(amount)) } @@ -114,7 +124,10 @@ pub trait Mutate: Inspect { let extra = Self::can_withdraw(asset, &source, amount).into_result()?; Self::can_deposit(asset, &dest, amount.saturating_add(extra)).into_result()?; let actual = Self::burn_from(asset, source, amount)?; - debug_assert!(actual == amount.saturating_add(extra), "can_withdraw must agree with withdraw; qed"); + debug_assert!( + actual == amount.saturating_add(extra), + "can_withdraw must agree with withdraw; qed" + ); match Self::mint_into(asset, dest, actual) { Ok(_) => Ok(actual), Err(err) => { @@ -123,7 +136,7 @@ pub trait Mutate: Inspect { let revert = Self::mint_into(asset, source, actual); debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); Err(err) - } + }, } } } @@ -158,8 +171,12 @@ pub trait MutateHold: InspectHold + Transfer { /// /// If `best_effort` is `true`, then the amount actually released and returned as the inner /// value of `Ok` may be smaller than the `amount` passed. - fn release(asset: Self::AssetId, who: &AccountId, amount: Self::Balance, best_effort: bool) - -> Result; + fn release( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + best_effort: bool, + ) -> Result; /// Transfer held funds into a destination account. /// @@ -190,17 +207,19 @@ pub trait BalancedHold: Balanced + MutateHold { /// /// As much funds up to `amount` will be deducted as possible. If this is less than `amount`, /// then a non-zero second item will be returned. - fn slash_held(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> (CreditOf, Self::Balance); + fn slash_held( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); } -impl< - AccountId, - T: Balanced + MutateHold, -> BalancedHold for T { - fn slash_held(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> (CreditOf, Self::Balance) - { +impl + MutateHold> BalancedHold for T { + fn slash_held( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance) { let actual = match Self::release(asset, who, amount, true) { Ok(x) => x, Err(_) => return (Imbalance::zero(asset), amount), diff --git a/substrate/frame/support/src/traits/tokens/fungibles/balanced.rs b/substrate/frame/support/src/traits/tokens/fungibles/balanced.rs index a1016f8c119557c4acdf6eb32ded946bab09cdc9..9c601c3e7c42dfc4d3e2942236731c09559c3f1f 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/balanced.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -19,11 +19,16 @@ //! requiring atomic balanced operations. use super::*; -use sp_std::marker::PhantomData; -use sp_runtime::{ArithmeticError, TokenError, traits::{Zero, CheckedAdd}}; +use crate::{ + dispatch::{DispatchError, DispatchResult}, + traits::misc::{SameOrOther, TryDrop}, +}; use sp_arithmetic::traits::Saturating; -use crate::dispatch::{DispatchError, DispatchResult}; -use crate::traits::misc::{SameOrOther, TryDrop}; +use sp_runtime::{ + traits::{CheckedAdd, Zero}, + ArithmeticError, TokenError, +}; +use sp_std::marker::PhantomData; /// A fungible token class where any creation and deletion of tokens is semi-explicit and where the /// total supply is maintained automatically. @@ -55,9 +60,10 @@ pub trait Balanced: Inspect { /// /// This is just the same as burning and issuing the same amount and has no effect on the /// total issuance. - fn pair(asset: Self::AssetId, amount: Self::Balance) - -> (DebtOf, CreditOf) - { + fn pair( + asset: Self::AssetId, + amount: Self::Balance, + ) -> (DebtOf, CreditOf) { (Self::rescind(asset, amount), Self::issue(asset, amount)) } @@ -96,7 +102,7 @@ pub trait Balanced: Inspect { asset: Self::AssetId, who: &AccountId, value: Self::Balance, - //TODO: liveness: ExistenceRequirement, + // TODO: liveness: ExistenceRequirement, ) -> Result, DispatchError>; /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` @@ -129,7 +135,7 @@ pub trait Balanced: Inspect { fn settle( who: &AccountId, debt: DebtOf, - //TODO: liveness: ExistenceRequirement, + // TODO: liveness: ExistenceRequirement, ) -> Result, DebtOf> { let amount = debt.peek(); let asset = debt.asset(); @@ -143,11 +149,11 @@ pub trait Balanced: Inspect { Ok(SameOrOther::Other(rest)) => { debug_assert!(false, "ok withdraw return must be at least debt value; qed"); Err(rest) - } + }, Err(_) => { debug_assert!(false, "debt.asset is credit.asset; qed"); Ok(CreditOf::::zero(asset)) - } + }, } } } @@ -173,9 +179,11 @@ pub trait Unbalanced: Inspect { /// /// Minimum balance will be respected and the returned imbalance may be up to /// `Self::minimum_balance() - 1` greater than `amount`. - fn decrease_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> Result - { + fn decrease_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { let old_balance = Self::balance(asset, who); let (mut new_balance, mut amount) = if old_balance < amount { Err(TokenError::NoFunds)? @@ -197,9 +205,11 @@ pub trait Unbalanced: Inspect { /// `Self::minimum_balance() - 1` greater than `amount`. /// /// Return the imbalance by which the account was reduced. - fn decrease_balance_at_most(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> Self::Balance - { + fn decrease_balance_at_most( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Self::Balance { let old_balance = Self::balance(asset, who); let (mut new_balance, mut amount) = if old_balance < amount { (Zero::zero(), old_balance) @@ -232,9 +242,11 @@ pub trait Unbalanced: Inspect { /// /// Minimum balance will be respected and an error will be returned if /// `amount < Self::minimum_balance()` when the account of `who` is zero. - fn increase_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> Result - { + fn increase_balance( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Result { let old_balance = Self::balance(asset, who); let new_balance = old_balance.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; if new_balance < Self::minimum_balance(asset) { @@ -252,9 +264,11 @@ pub trait Unbalanced: Inspect { /// `amount < Self::minimum_balance()`. /// /// Return the imbalance by which the account was increased. - fn increase_balance_at_most(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) - -> Self::Balance - { + fn increase_balance_at_most( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> Self::Balance { let old_balance = Self::balance(asset, who); let mut new_balance = old_balance.saturating_add(amount); let mut amount = new_balance - old_balance; @@ -361,7 +375,7 @@ impl> Balanced for U { fn deposit( asset: Self::AssetId, who: &AccountId, - amount: Self::Balance + amount: Self::Balance, ) -> Result, DispatchError> { let increase = U::increase_balance(asset, who, amount)?; Ok(debt(asset, increase)) @@ -370,7 +384,7 @@ impl> Balanced for U { asset: Self::AssetId, who: &AccountId, amount: Self::Balance, - //TODO: liveness: ExistenceRequirement, + // TODO: liveness: ExistenceRequirement, ) -> Result, DispatchError> { let decrease = U::decrease_balance(asset, who, amount)?; Ok(credit(asset, decrease)) diff --git a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs index 9ecdeac1d4f0642f79dfe4098d750bf7ccc3ac9e..2195cacc428225371be436368a6f0765ee77f1c2 100644 --- a/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -18,12 +18,14 @@ //! The imbalance type and its associates, which handles keeps everything adding up properly with //! unbalanced operations. -use super::*; +use super::{ + balanced::Balanced, + fungibles::{AssetId, Balance}, + *, +}; +use crate::traits::misc::{SameOrOther, TryDrop}; +use sp_runtime::{traits::Zero, RuntimeDebug}; use sp_std::marker::PhantomData; -use sp_runtime::{RuntimeDebug, traits::Zero}; -use super::fungibles::{AssetId, Balance}; -use super::balanced::Balanced; -use crate::traits::misc::{TryDrop, SameOrOther}; /// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or /// debt (positive) imbalance. @@ -50,11 +52,12 @@ pub struct Imbalance< } impl< - A: AssetId, - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop -> Drop for Imbalance { + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, + > Drop for Imbalance +{ fn drop(&mut self) { if !self.amount.is_zero() { OnDrop::handle(self.asset, self.amount) @@ -63,11 +66,12 @@ impl< } impl< - A: AssetId, - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop, -> TryDrop for Imbalance { + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, + > TryDrop for Imbalance +{ /// Drop an instance cleanly. Only works if its value represents "no-operation". fn try_drop(self) -> Result<(), Self> { self.drop_zero() @@ -75,11 +79,12 @@ impl< } impl< - A: AssetId, - B: Balance, - OnDrop: HandleImbalanceDrop, - OppositeOnDrop: HandleImbalanceDrop, -> Imbalance { + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, + > Imbalance +{ pub fn zero(asset: A) -> Self { Self { asset, amount: Zero::zero(), _phantom: PhantomData } } @@ -122,7 +127,10 @@ impl< Err(other) } } - pub fn offset(self, other: Imbalance) -> Result< + pub fn offset( + self, + other: Imbalance, + ) -> Result< SameOrOther>, (Self, Imbalance), > { diff --git a/substrate/frame/support/src/traits/tokens/imbalance.rs b/substrate/frame/support/src/traits/tokens/imbalance.rs index 9652b9a0275a1ccd8c2d2bac74352784dd67ce42..0f7b38a65efc805c8ce101d1d0f0426cd41955b6 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance.rs @@ -18,16 +18,16 @@ //! The imbalance trait type and its associates, which handles keeps everything adding up properly //! with unbalanced operations. -use sp_std::ops::Div; +use crate::traits::misc::{SameOrOther, TryDrop}; use sp_runtime::traits::Saturating; -use crate::traits::misc::{TryDrop, SameOrOther}; +use sp_std::ops::Div; -mod split_two_ways; -mod signed_imbalance; mod on_unbalanced; -pub use split_two_ways::SplitTwoWays; -pub use signed_imbalance::SignedImbalance; +mod signed_imbalance; +mod split_two_ways; pub use on_unbalanced::OnUnbalanced; +pub use signed_imbalance::SignedImbalance; +pub use split_two_ways::SplitTwoWays; /// A trait for a not-quite Linear Type that tracks an imbalance. /// @@ -78,10 +78,13 @@ pub trait Imbalance: Sized + TryDrop + Default { /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should /// fit into a `u32`. Overflow will safely saturate in both cases. fn ration(self, first: u32, second: u32) -> (Self, Self) - where Balance: From + Saturating + Div + where + Balance: From + Saturating + Div, { let total: u32 = first.saturating_add(second); - if total == 0 { return (Self::zero(), Self::zero()) } + if total == 0 { + return (Self::zero(), Self::zero()) + } let amount1 = self.peek().saturating_mul(first.into()) / total.into(); self.split(amount1) } @@ -100,7 +103,8 @@ pub trait Imbalance: Sized + TryDrop + Default { /// /// A convenient replacement for `split` and `merge`. fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) - where Balance: From + Saturating + Div + where + Balance: From + Saturating + Div, { let (a, b) = self.ration(first, second); (a.merge(others.0), b.merge(others.1)) @@ -121,7 +125,8 @@ pub trait Imbalance: Sized + TryDrop + Default { /// /// A convenient replacement for `split` and `merge`. fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) - where Balance: From + Saturating + Div + where + Balance: From + Saturating + Div, { let (a, b) = self.ration(first, second); others.0.subsume(a); @@ -167,7 +172,7 @@ pub trait Imbalance: Sized + TryDrop + Default { /// greater value than the `other`. Otherwise returns `Err` with an instance of /// the `Opposite`. In both cases the value represents the combination of `self` /// and `other`. - fn offset(self, other: Self::Opposite)-> SameOrOther; + fn offset(self, other: Self::Opposite) -> SameOrOther; /// The raw value of self. fn peek(&self) -> Balance; diff --git a/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs b/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs index f3ecc14308e74dd990b0a0e3ffae196ea0dd0a8a..bc7df0e2acf3300638dde71a4f64ae8765eb4b3d 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs @@ -33,7 +33,10 @@ pub trait OnUnbalanced { /// Handler for some imbalances. The different imbalances might have different origins or /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all /// of them. Infallible. - fn on_unbalanceds(amounts: impl Iterator) where Imbalance: crate::traits::Imbalance { + fn on_unbalanceds(amounts: impl Iterator) + where + Imbalance: crate::traits::Imbalance, + { Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) } @@ -44,7 +47,9 @@ pub trait OnUnbalanced { /// Actually handle a non-zero imbalance. You probably want to implement this rather than /// `on_unbalanced`. - fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } + fn on_nonzero_unbalanced(amount: Imbalance) { + drop(amount); + } } impl OnUnbalanced for () {} diff --git a/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs b/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs index e3523f86804fd1eaf0ea25969d8445f04077467a..59302b975854f748fb8a1564b1e85a780922823b 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs @@ -17,14 +17,14 @@ //! Convenience type for managing an imbalance whose sign is unknown. +use super::super::imbalance::Imbalance; +use crate::traits::misc::SameOrOther; use codec::FullCodec; -use sp_std::fmt::Debug; use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; -use crate::traits::misc::SameOrOther; -use super::super::imbalance::Imbalance; +use sp_std::fmt::Debug; /// Either a positive or a negative imbalance. -pub enum SignedImbalance>{ +pub enum SignedImbalance> { /// A positive imbalance (funds have been created but none destroyed). Positive(PositiveImbalance), /// A negative imbalance (funds have been destroyed but none created). @@ -32,10 +32,11 @@ pub enum SignedImbalance>{ } impl< - P: Imbalance, - N: Imbalance, - B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, -> SignedImbalance { + P: Imbalance, + N: Imbalance, + B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, + > SignedImbalance +{ /// Create a `Positive` instance of `Self` whose value is zero. pub fn zero() -> Self { SignedImbalance::Positive(P::zero()) diff --git a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs index f3f9870b62cd20381db5f6fb8ed10fd906139689..882b43c2e914cdc60104c078bbf4d38960bc198f 100644 --- a/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs +++ b/substrate/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -17,29 +17,24 @@ //! Means for splitting an imbalance into two and hanlding them differently. -use sp_std::{ops::Div, marker::PhantomData}; +use super::super::imbalance::{Imbalance, OnUnbalanced}; use sp_core::u32_trait::Value as U32; use sp_runtime::traits::Saturating; -use super::super::imbalance::{Imbalance, OnUnbalanced}; +use sp_std::{marker::PhantomData, ops::Div}; /// Split an unbalanced amount two ways between a common divisor. -pub struct SplitTwoWays< - Balance, - Imbalance, - Part1, - Target1, - Part2, - Target2, ->(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); +pub struct SplitTwoWays( + PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>, +); impl< - Balance: From + Saturating + Div, - I: Imbalance, - Part1: U32, - Target1: OnUnbalanced, - Part2: U32, - Target2: OnUnbalanced, -> OnUnbalanced for SplitTwoWays + Balance: From + Saturating + Div, + I: Imbalance, + Part1: U32, + Target1: OnUnbalanced, + Part2: U32, + Target2: OnUnbalanced, + > OnUnbalanced for SplitTwoWays { fn on_nonzero_unbalanced(amount: I) { let total: u32 = Part1::VALUE + Part2::VALUE; diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index 97c111798caaa5634ef92af54e53e3d651db0c72..8eda930380d833e5d3adc385564d677921c52dec 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -17,11 +17,11 @@ //! Miscellaneous types. -use sp_std::fmt::Debug; -use codec::{Encode, Decode, FullCodec}; +use codec::{Decode, Encode, FullCodec}; +use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; use sp_core::RuntimeDebug; -use sp_arithmetic::traits::{Zero, AtLeast32BitUnsigned}; -use sp_runtime::{DispatchError, ArithmeticError, TokenError}; +use sp_runtime::{ArithmeticError, DispatchError, TokenError}; +use sp_std::fmt::Debug; /// One of a number of consequences of withdrawing a fungible from an account. #[derive(Copy, Clone, Eq, PartialEq)] @@ -150,7 +150,7 @@ impl WithdrawReasons { /// assert_eq!( /// WithdrawReasons::FEE | WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE | WithdrawReasons::TIP, /// WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), - /// ); + /// ); /// # } /// ``` pub fn except(one: WithdrawReasons) -> WithdrawReasons { @@ -161,7 +161,7 @@ impl WithdrawReasons { } /// Simple amalgamation trait to collect together properties for an AssetId under one roof. -pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug {} +pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug {} impl AssetId for T {} /// Simple amalgamation trait to collect together properties for a Balance under one roof. diff --git a/substrate/frame/support/src/traits/tokens/nonfungible.rs b/substrate/frame/support/src/traits/tokens/nonfungible.rs index 27e6cf8126a8e8faca369f0ffbee0496e9858b9d..821884f6e3905e8787fe90e4deeabfe23c376d77 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungible.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungible.rs @@ -24,12 +24,11 @@ //! For an NFT API which has dual-level namespacing, the traits in `nonfungibles` are better to //! use. -use codec::{Encode, Decode}; -use sp_std::prelude::*; -use sp_runtime::TokenError; -use crate::dispatch::DispatchResult; -use crate::traits::Get; use super::nonfungibles; +use crate::{dispatch::DispatchResult, traits::Get}; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; /// Trait for providing an interface to a read-only NFT-like set of asset instances. pub trait Inspect { @@ -43,7 +42,9 @@ pub trait Inspect { /// Returns the attribute value of `instance` corresponding to `key`. /// /// By default this is `None`; no attributes are defined. - fn attribute(_instance: &Self::InstanceId, _key: &[u8]) -> Option> { None } + fn attribute(_instance: &Self::InstanceId, _key: &[u8]) -> Option> { + None + } /// Returns the strongly-typed attribute value of `instance` corresponding to `key`. /// @@ -56,7 +57,9 @@ pub trait Inspect { /// Returns `true` if the asset `instance` may be transferred. /// /// Default implementation is that all assets are transferable. - fn can_transfer(_instance: &Self::InstanceId) -> bool { true } + fn can_transfer(_instance: &Self::InstanceId) -> bool { + true + } } /// Interface for enumerating assets in existence or owned by a given account over a collection @@ -117,15 +120,14 @@ pub struct ItemOf< F: nonfungibles::Inspect, A: Get<>::ClassId>, AccountId, ->( - sp_std::marker::PhantomData<(F, A, AccountId)> -); +>(sp_std::marker::PhantomData<(F, A, AccountId)>); impl< - F: nonfungibles::Inspect, - A: Get<>::ClassId>, - AccountId, -> Inspect for ItemOf { + F: nonfungibles::Inspect, + A: Get<>::ClassId>, + AccountId, + > Inspect for ItemOf +{ type InstanceId = >::InstanceId; fn owner(instance: &Self::InstanceId) -> Option { >::owner(&A::get(), instance) @@ -142,10 +144,11 @@ impl< } impl< - F: nonfungibles::InspectEnumerable, - A: Get<>::ClassId>, - AccountId, -> InspectEnumerable for ItemOf { + F: nonfungibles::InspectEnumerable, + A: Get<>::ClassId>, + AccountId, + > InspectEnumerable for ItemOf +{ fn instances() -> Box> { >::instances(&A::get()) } @@ -155,10 +158,11 @@ impl< } impl< - F: nonfungibles::Mutate, - A: Get<>::ClassId>, - AccountId, -> Mutate for ItemOf { + F: nonfungibles::Mutate, + A: Get<>::ClassId>, + AccountId, + > Mutate for ItemOf +{ fn mint_into(instance: &Self::InstanceId, who: &AccountId) -> DispatchResult { >::mint_into(&A::get(), instance, who) } @@ -178,10 +182,11 @@ impl< } impl< - F: nonfungibles::Transfer, - A: Get<>::ClassId>, - AccountId, -> Transfer for ItemOf { + F: nonfungibles::Transfer, + A: Get<>::ClassId>, + AccountId, + > Transfer for ItemOf +{ fn transfer(instance: &Self::InstanceId, destination: &AccountId) -> DispatchResult { >::transfer(&A::get(), instance, destination) } diff --git a/substrate/frame/support/src/traits/tokens/nonfungibles.rs b/substrate/frame/support/src/traits/tokens/nonfungibles.rs index b50c5f4d9814cfa9c17fa7bfc5a17e561601f581..64bbf3a8edf7a7aa14a7f8712973a11ddd0a2550 100644 --- a/substrate/frame/support/src/traits/tokens/nonfungibles.rs +++ b/substrate/frame/support/src/traits/tokens/nonfungibles.rs @@ -27,10 +27,10 @@ //! Implementations of these traits may be converted to implementations of corresponding //! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::TokenError; use crate::dispatch::DispatchResult; +use codec::{Decode, Encode}; +use sp_runtime::TokenError; +use sp_std::prelude::*; /// Trait for providing an interface to many read-only NFT-like sets of asset instances. pub trait Inspect { @@ -48,14 +48,18 @@ pub trait Inspect { /// Returns the owner of the asset `class`, if there is one. For many NFTs this may not make /// any sense, so users of this API should not be surprised to find an asset class results in /// `None` here. - fn class_owner(_class: &Self::ClassId) -> Option { None } + fn class_owner(_class: &Self::ClassId) -> Option { + None + } /// Returns the attribute value of `instance` of `class` corresponding to `key`. /// /// By default this is `None`; no attributes are defined. - fn attribute(_class: &Self::ClassId, _instance: &Self::InstanceId, _key: &[u8]) - -> Option> - { + fn attribute( + _class: &Self::ClassId, + _instance: &Self::InstanceId, + _key: &[u8], + ) -> Option> { None } @@ -74,15 +78,14 @@ pub trait Inspect { /// Returns the attribute value of `class` corresponding to `key`. /// /// By default this is `None`; no attributes are defined. - fn class_attribute(_class: &Self::ClassId, _key: &[u8]) -> Option> { None } + fn class_attribute(_class: &Self::ClassId, _key: &[u8]) -> Option> { + None + } /// Returns the strongly-typed attribute value of `class` corresponding to `key`. /// /// By default this just attempts to use `class_attribute`. - fn typed_class_attribute( - class: &Self::ClassId, - key: &K, - ) -> Option { + fn typed_class_attribute(class: &Self::ClassId, key: &K) -> Option { key.using_encoded(|d| Self::class_attribute(class, d)) .and_then(|v| V::decode(&mut &v[..]).ok()) } @@ -90,7 +93,9 @@ pub trait Inspect { /// Returns `true` if the asset `instance` of `class` may be transferred. /// /// Default implementation is that all assets are transferable. - fn can_transfer(_class: &Self::ClassId, _instance: &Self::InstanceId) -> bool { true } + fn can_transfer(_class: &Self::ClassId, _instance: &Self::InstanceId) -> bool { + true + } } /// Interface for enumerating assets in existence or owned by a given account over many collections @@ -106,7 +111,10 @@ pub trait InspectEnumerable: Inspect { fn owned(who: &AccountId) -> Box>; /// Returns an iterator of the asset instances of `class` owned by `who`. - fn owned_in_class(class: &Self::ClassId, who: &AccountId) -> Box>; + fn owned_in_class( + class: &Self::ClassId, + who: &AccountId, + ) -> Box>; } /// Trait for providing an interface for multiple classes of NFT-like assets which may be minted, @@ -151,19 +159,13 @@ pub trait Mutate: Inspect { key: &K, value: &V, ) -> DispatchResult { - key.using_encoded(|k| value.using_encoded(|v| - Self::set_attribute(class, instance, k, v) - )) + key.using_encoded(|k| value.using_encoded(|v| Self::set_attribute(class, instance, k, v))) } /// Set attribute `value` of asset `class`'s `key`. /// /// By default, this is not a supported operation. - fn set_class_attribute( - _class: &Self::ClassId, - _key: &[u8], - _value: &[u8], - ) -> DispatchResult { + fn set_class_attribute(_class: &Self::ClassId, _key: &[u8], _value: &[u8]) -> DispatchResult { Err(TokenError::Unsupported.into()) } @@ -175,9 +177,7 @@ pub trait Mutate: Inspect { key: &K, value: &V, ) -> DispatchResult { - key.using_encoded(|k| value.using_encoded(|v| - Self::set_class_attribute(class, k, v) - )) + key.using_encoded(|k| value.using_encoded(|v| Self::set_class_attribute(class, k, v))) } } diff --git a/substrate/frame/support/src/traits/validation.rs b/substrate/frame/support/src/traits/validation.rs index d0583d6991fe6112d48ea68adb6cd910145650fe..5a68f289df48f7c05c2c0070b1be7fdebbf53bdb 100644 --- a/substrate/frame/support/src/traits/validation.rs +++ b/substrate/frame/support/src/traits/validation.rs @@ -17,13 +17,14 @@ //! Traits for dealing with validation and validators. -use sp_std::prelude::*; +use crate::{dispatch::Parameter, weights::Weight}; use codec::{Codec, Decode}; -use sp_runtime::traits::{Convert, Zero}; -use sp_runtime::{BoundToRuntimeAppPublic, ConsensusEngineId, Permill, RuntimeAppPublic}; +use sp_runtime::{ + traits::{Convert, Zero}, + BoundToRuntimeAppPublic, ConsensusEngineId, Permill, RuntimeAppPublic, +}; use sp_staking::SessionIndex; -use crate::dispatch::Parameter; -use crate::weights::Weight; +use sp_std::prelude::*; /// A trait for online node inspection in a session. /// @@ -54,12 +55,14 @@ pub trait ValidatorSetWithIdentification: ValidatorSet { pub trait FindAuthor { /// Find the author of a block based on the pre-runtime digests. fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator; + where + I: 'a + IntoIterator; } impl FindAuthor for () { fn find_author<'a, I>(_: I) -> Option - where I: 'a + IntoIterator + where + I: 'a + IntoIterator, { None } @@ -81,7 +84,9 @@ pub trait OneSessionHandler: BoundToRuntimeAppPublic { /// for the second session, therefore the first call to `on_new_session` /// should provide the same validator set. fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator, ValidatorId: 'a; + where + I: Iterator, + ValidatorId: 'a; /// Session set has changed; act appropriately. Note that this can be called /// before initialization of your module. @@ -92,11 +97,10 @@ pub trait OneSessionHandler: BoundToRuntimeAppPublic { /// /// The `validators` are the validators of the incoming session, and `queued_validators` /// will follow. - fn on_new_session<'a, I: 'a>( - changed: bool, - validators: I, - queued_validators: I, - ) where I: Iterator, ValidatorId: 'a; + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + ValidatorId: 'a; /// A notification for end of the session. /// diff --git a/substrate/frame/support/src/traits/voting.rs b/substrate/frame/support/src/traits/voting.rs index f5afbac12955541d9986da7a7fe7840b4c7214f8..62c6217ad59bc4f255c221da3e79d1ccea50d9fa 100644 --- a/substrate/frame/support/src/traits/voting.rs +++ b/substrate/frame/support/src/traits/voting.rs @@ -18,7 +18,7 @@ //! Traits and associated data structures concerned with voting, and moving between tokens and //! votes. -use sp_arithmetic::traits::{UniqueSaturatedInto, UniqueSaturatedFrom, SaturatedConversion}; +use sp_arithmetic::traits::{SaturatedConversion, UniqueSaturatedFrom, UniqueSaturatedInto}; /// A trait similar to `Convert` to convert values from `B` an abstract balance type /// into u64 and back from u128. (This conversion is used in election and other places where complex @@ -69,7 +69,6 @@ impl CurrencyToVote for U128CurrencyToVote { } } - /// A naive implementation of `CurrencyConvert` that simply saturates all conversions. /// /// # Warning @@ -77,7 +76,9 @@ impl CurrencyToVote for U128CurrencyToVote { /// This is designed to be used mostly for testing. Use with care, and think about the consequences. pub struct SaturatingCurrencyToVote; -impl + UniqueSaturatedFrom> CurrencyToVote for SaturatingCurrencyToVote { +impl + UniqueSaturatedFrom> CurrencyToVote + for SaturatingCurrencyToVote +{ fn to_vote(value: B, _: B) -> u64 { value.unique_saturated_into() } diff --git a/substrate/frame/support/src/weights.rs b/substrate/frame/support/src/weights.rs index 2b7cff8c6168cfca6d5ed1f69b38cfc29a4e8312..c0431534ed937a2850603e5b52ecff67b8b3b7d6 100644 --- a/substrate/frame/support/src/weights.rs +++ b/substrate/frame/support/src/weights.rs @@ -127,15 +127,20 @@ //! - Ubuntu 19.10 (GNU/Linux 5.3.0-18-generic x86_64) //! - rustc 1.42.0 (b8cedc004 2020-03-09) +use crate::dispatch::{DispatchError, DispatchErrorWithPostInfo, DispatchResultWithPostInfo}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_runtime::{RuntimeDebug, traits::SignedExtension}; -use sp_runtime::generic::{CheckedExtrinsic, UncheckedExtrinsic}; -use crate::dispatch::{DispatchErrorWithPostInfo, DispatchResultWithPostInfo, DispatchError}; -use sp_runtime::traits::SaturatedConversion; -use sp_arithmetic::{Perbill, traits::{BaseArithmetic, Saturating, Unsigned}}; +use serde::{Deserialize, Serialize}; use smallvec::{smallvec, SmallVec}; +use sp_arithmetic::{ + traits::{BaseArithmetic, Saturating, Unsigned}, + Perbill, +}; +use sp_runtime::{ + generic::{CheckedExtrinsic, UncheckedExtrinsic}, + traits::{SaturatedConversion, SignedExtension}, + RuntimeDebug, +}; /// Re-export priority as type pub use sp_runtime::transaction_validity::TransactionPriority; @@ -152,7 +157,7 @@ pub mod constants { pub const WEIGHT_PER_SECOND: Weight = 1_000_000_000_000; pub const WEIGHT_PER_MILLIS: Weight = WEIGHT_PER_SECOND / 1000; // 1_000_000_000 pub const WEIGHT_PER_MICROS: Weight = WEIGHT_PER_MILLIS / 1000; // 1_000_000 - pub const WEIGHT_PER_NANOS: Weight = WEIGHT_PER_MICROS / 1000; // 1_000 + pub const WEIGHT_PER_NANOS: Weight = WEIGHT_PER_MICROS / 1000; // 1_000 parameter_types! { /// Importing a block with 0 txs takes ~5 ms @@ -267,13 +272,17 @@ pub trait OneOrMany { } impl OneOrMany for DispatchClass { - type Iter = sp_std::iter::Once; - fn into_iter(self) -> Self::Iter { sp_std::iter::once(self) } + type Iter = sp_std::iter::Once; + fn into_iter(self) -> Self::Iter { + sp_std::iter::once(self) + } } impl<'a> OneOrMany for &'a [DispatchClass] { - type Iter = sp_std::iter::Cloned>; - fn into_iter(self) -> Self::Iter { self.iter().cloned() } + type Iter = sp_std::iter::Cloned>; + fn into_iter(self) -> Self::Iter { + self.iter().cloned() + } } /// Primitives related to priority management of Frame. @@ -365,43 +374,32 @@ pub fn extract_actual_weight(result: &DispatchResultWithPostInfo, info: &Dispatc match result { Ok(post_info) => &post_info, Err(err) => &err.post_info, - }.calc_actual_weight(info) + } + .calc_actual_weight(info) } impl From<(Option, Pays)> for PostDispatchInfo { fn from(post_weight_info: (Option, Pays)) -> Self { let (actual_weight, pays_fee) = post_weight_info; - Self { - actual_weight, - pays_fee, - } + Self { actual_weight, pays_fee } } } impl From for PostDispatchInfo { fn from(pays_fee: Pays) -> Self { - Self { - actual_weight: None, - pays_fee, - } + Self { actual_weight: None, pays_fee } } } impl From> for PostDispatchInfo { fn from(actual_weight: Option) -> Self { - Self { - actual_weight, - pays_fee: Default::default(), - } + Self { actual_weight, pays_fee: Default::default() } } } impl From<()> for PostDispatchInfo { fn from(_: ()) -> Self { - Self { - actual_weight: None, - pays_fee: Default::default(), - } + Self { actual_weight: None, pays_fee: Default::default() } } } @@ -434,8 +432,9 @@ pub trait WithPostDispatchInfo { fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; } -impl WithPostDispatchInfo for T where - T: Into +impl WithPostDispatchInfo for T +where + T: Into, { fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { DispatchErrorWithPostInfo { @@ -542,8 +541,9 @@ impl WeighData for FunctionOf { // `WeighData` as a closure #[allow(deprecated)] -impl WeighData for FunctionOf where - WD : Fn(Args) -> Weight +impl WeighData for FunctionOf +where + WD: Fn(Args) -> Weight, { fn weigh_data(&self, args: Args) -> Weight { (self.0)(args) @@ -560,8 +560,9 @@ impl ClassifyDispatch for FunctionOf // `ClassifyDispatch` as a raw value #[allow(deprecated)] -impl ClassifyDispatch for FunctionOf where - CD : Fn(Args) -> DispatchClass +impl ClassifyDispatch for FunctionOf +where + CD: Fn(Args) -> DispatchClass, { fn classify_dispatch(&self, args: Args) -> DispatchClass { (self.1)(args) @@ -578,8 +579,9 @@ impl PaysFee for FunctionOf { // `PaysFee` as a closure #[allow(deprecated)] -impl PaysFee for FunctionOf where - PF : Fn(Args) -> Pays +impl PaysFee for FunctionOf +where + PF: Fn(Args) -> Pays, { fn pays_fee(&self, args: Args) -> Pays { (self.2)(args) @@ -599,8 +601,7 @@ where } /// Implementation for checked extrinsic. -impl GetDispatchInfo - for CheckedExtrinsic +impl GetDispatchInfo for CheckedExtrinsic where Call: GetDispatchInfo, { @@ -614,11 +615,7 @@ where impl GetDispatchInfo for sp_runtime::testing::TestXt { fn get_dispatch_info(&self) -> DispatchInfo { // for testing: weight == size. - DispatchInfo { - weight: self.encode().len() as _, - pays_fee: Pays::Yes, - ..Default::default() - } + DispatchInfo { weight: self.encode().len() as _, pays_fee: Pays::Yes, ..Default::default() } } } @@ -690,32 +687,35 @@ pub trait WeightToFeePolynomial { /// This should not be overriden in most circumstances. Calculation is done in the /// `Balance` type and never overflows. All evaluation is saturating. fn calc(weight: &Weight) -> Self::Balance { - Self::polynomial().iter().fold(Self::Balance::saturated_from(0u32), |mut acc, args| { - let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); - - // The sum could get negative. Therefore we only sum with the accumulator. - // The Perbill Mul implementation is non overflowing. - let frac = args.coeff_frac * w; - let integer = args.coeff_integer.saturating_mul(w); - - if args.negative { - acc = acc.saturating_sub(frac); - acc = acc.saturating_sub(integer); - } else { - acc = acc.saturating_add(frac); - acc = acc.saturating_add(integer); - } + Self::polynomial() + .iter() + .fold(Self::Balance::saturated_from(0u32), |mut acc, args| { + let w = Self::Balance::saturated_from(*weight).saturating_pow(args.degree.into()); - acc - }) + // The sum could get negative. Therefore we only sum with the accumulator. + // The Perbill Mul implementation is non overflowing. + let frac = args.coeff_frac * w; + let integer = args.coeff_integer.saturating_mul(w); + + if args.negative { + acc = acc.saturating_sub(frac); + acc = acc.saturating_sub(integer); + } else { + acc = acc.saturating_add(frac); + acc = acc.saturating_add(integer); + } + + acc + }) } } /// Implementor of `WeightToFeePolynomial` that maps one unit of weight to one unit of fee. pub struct IdentityFee(sp_std::marker::PhantomData); -impl WeightToFeePolynomial for IdentityFee where - T: BaseArithmetic + From + Copy + Unsigned +impl WeightToFeePolynomial for IdentityFee +where + T: BaseArithmetic + From + Copy + Unsigned, { type Balance = T; @@ -813,8 +813,8 @@ impl PerDispatchClass { #[cfg(test)] #[allow(dead_code)] mod tests { - use crate::{decl_module, parameter_types, traits::Get}; use super::*; + use crate::{decl_module, parameter_types, traits::Get}; pub trait Config: 'static { type Origin; @@ -925,24 +925,15 @@ mod tests { #[test] fn extract_actual_weight_works() { - let pre = DispatchInfo { - weight: 1000, - .. Default::default() - }; + let pre = DispatchInfo { weight: 1000, ..Default::default() }; assert_eq!(extract_actual_weight(&Ok(Some(7).into()), &pre), 7); assert_eq!(extract_actual_weight(&Ok(Some(1000).into()), &pre), 1000); - assert_eq!( - extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), - 9 - ); + assert_eq!(extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(9)), &pre), 9); } #[test] fn extract_actual_weight_caps_at_pre_weight() { - let pre = DispatchInfo { - weight: 1000, - .. Default::default() - }; + let pre = DispatchInfo { weight: 1000, ..Default::default() }; assert_eq!(extract_actual_weight(&Ok(Some(1250).into()), &pre), 1000); assert_eq!( extract_actual_weight(&Err(DispatchError::BadOrigin.with_weight(1300)), &pre), diff --git a/substrate/frame/support/test/src/pallet_version.rs b/substrate/frame/support/test/src/pallet_version.rs index aaa46c3ef2c6047401f927330fcf9d053c27b866..882c0b78b7338bfa24cdf1b6a37119183c2a93d5 100644 --- a/substrate/frame/support/test/src/pallet_version.rs +++ b/substrate/frame/support/test/src/pallet_version.rs @@ -25,8 +25,5 @@ fn ensure_that_current_pallet_version_is_correct() { patch: env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(), }; - assert_eq!( - expected, - crate_to_pallet_version!(), - ) + assert_eq!(expected, crate_to_pallet_version!(),) } diff --git a/substrate/frame/support/test/tests/construct_runtime.rs b/substrate/frame/support/test/tests/construct_runtime.rs index dde7f6d53f8ed461d3570bac80f95efe199809e2..98669cb1add09bbdf3d91bf3a70f83d029b2cf55 100644 --- a/substrate/frame/support/test/tests/construct_runtime.rs +++ b/substrate/frame/support/test/tests/construct_runtime.rs @@ -19,12 +19,16 @@ //! * error declareed with decl_error works //! * integrity test is generated -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_runtime::{generic, traits::{BlakeTwo256, Verify}, DispatchError}; -use sp_core::{H256, sr25519}; -use sp_std::cell::RefCell; use frame_support::traits::PalletInfo as _; +use sp_core::{sr25519, H256}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Verify}, + DispatchError, +}; +use sp_std::cell::RefCell; mod system; @@ -51,7 +55,7 @@ mod module1 { } #[derive(Clone, PartialEq, Eq, Debug, codec::Encode, codec::Decode)] - pub struct Origin(pub core::marker::PhantomData::<(T, I)>); + pub struct Origin(pub core::marker::PhantomData<(T, I)>); frame_support::decl_event! { pub enum Event where @@ -263,8 +267,8 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; mod origin_test { - use frame_support::traits::{Filter, OriginTrait}; use super::{module3, nested, system, Block, UncheckedExtrinsic}; + use frame_support::traits::{Filter, OriginTrait}; impl nested::module3::Config for RuntimeOriginTest {} impl module3::Config for RuntimeOriginTest {} @@ -556,10 +560,22 @@ fn get_call_names() { fn get_module_names() { use frame_support::dispatch::GetCallMetadata; let module_names = Call::get_module_names(); - assert_eq!([ - "System", "Module1_1", "Module2", "Module1_2", "NestedModule3", "Module3", - "Module1_4", "Module1_6", "Module1_7", "Module1_8", "Module1_9", - ], module_names); + assert_eq!( + [ + "System", + "Module1_1", + "Module2", + "Module1_2", + "NestedModule3", + "Module3", + "Module1_4", + "Module1_6", + "Module1_7", + "Module1_8", + "Module1_9", + ], + module_names + ); } #[test] @@ -583,28 +599,32 @@ fn test_metadata() { ModuleMetadata { name: DecodeDifferent::Encode("System"), storage: None, - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("noop"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("ExtrinsicSuccess"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - EventMetadata { - name: DecodeDifferent::Encode("ExtrinsicFailed"), + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("noop"), arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), - }, - EventMetadata { - name: DecodeDifferent::Encode("Ignore"), - arguments: DecodeDifferent::Encode(&["BlockNumber"]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]))), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[ + EventMetadata { + name: DecodeDifferent::Encode("ExtrinsicSuccess"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + EventMetadata { + name: DecodeDifferent::Encode("ExtrinsicFailed"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + EventMetadata { + name: DecodeDifferent::Encode("Ignore"), + arguments: DecodeDifferent::Encode(&["BlockNumber"]), + documentation: DecodeDifferent::Encode(&[]), + }, + ] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 30, @@ -615,18 +635,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Instance1Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { name: DecodeDifferent::Encode("fail"), arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 31, @@ -637,20 +659,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { name: DecodeDifferent::Encode("fail"), arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[ - EventMetadata { + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { name: DecodeDifferent::Encode("A"), arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), - }, - ]))), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 32, @@ -661,16 +683,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Instance2Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 33, @@ -681,20 +707,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { name: DecodeDifferent::Encode("fail"), arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[ - EventMetadata { + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { name: DecodeDifferent::Encode("A"), arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), - }, - ]))), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 34, @@ -705,68 +731,68 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_1"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[ + FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_1"), + arguments: DecodeDifferent::Encode(&[FunctionArgumentMetadata { name: DecodeDifferent::Encode("_data"), ty: DecodeDifferent::Encode("Compact"), - }, - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_2"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("Compact"), - }, - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_3"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("String"), - }, - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_4"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("operational"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[ - EventMetadata { + }]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_2"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("Compact"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_3"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("String"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_4"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("operational"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + ] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { name: DecodeDifferent::Encode("A"), arguments: DecodeDifferent::Encode(&[]), documentation: DecodeDifferent::Encode(&[]), - }, - ]))), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 35, @@ -786,11 +812,13 @@ fn test_metadata() { ModuleMetadata { name: DecodeDifferent::Encode("Module1_4"), storage: None, - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), event: None, constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), @@ -800,11 +828,13 @@ fn test_metadata() { name: DecodeDifferent::Encode("Module1_5"), storage: None, calls: None, - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 4, @@ -815,16 +845,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Instance6Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 1, @@ -835,16 +869,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Instance7Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 2, @@ -855,16 +893,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Instance8Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 12, @@ -875,16 +917,20 @@ fn test_metadata() { prefix: DecodeDifferent::Encode("Instance9Module"), entries: DecodeDifferent::Encode(&[]), }))), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[FunctionMetadata { - name: DecodeDifferent::Encode("fail"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }]))), - event: Some(DecodeDifferent::Encode(FnEncode(|| &[EventMetadata { - name: DecodeDifferent::Encode("A"), - arguments: DecodeDifferent::Encode(&["AccountId"]), - documentation: DecodeDifferent::Encode(&[]), - }]))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("fail"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("A"), + arguments: DecodeDifferent::Encode(&["AccountId"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), constants: DecodeDifferent::Encode(FnEncode(|| &[])), errors: DecodeDifferent::Encode(FnEncode(|| &[])), index: 13, diff --git a/substrate/frame/support/test/tests/decl_storage.rs b/substrate/frame/support/test/tests/decl_storage.rs index 85c3d8f6756abfef9499b21ef1985fad3aca3e6a..666dda49935efb5185d5f3cdb8f1c9d7d524728d 100644 --- a/substrate/frame/support/test/tests/decl_storage.rs +++ b/substrate/frame/support/test/tests/decl_storage.rs @@ -28,8 +28,7 @@ mod tests { } pub trait Config: frame_support_test::Config { - type Origin2: codec::Codec + codec::EncodeLike + Default - + codec::MaxEncodedLen; + type Origin2: codec::Codec + codec::EncodeLike + Default + codec::MaxEncodedLen; } frame_support::decl_storage! { @@ -104,329 +103,334 @@ mod tests { const EXPECTED_METADATA: StorageMetadata = StorageMetadata { prefix: DecodeDifferent::Encode("TestStorage"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("U32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[ " Hello, this is doc!" ]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBU32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("U32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBU32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Origin2")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIG"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIG"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32MYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIGMYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEFOPT"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEFOPT(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GetU32WithBuilder"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetU32WithBuilder(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GetOptU32WithBuilderSome"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetOptU32WithBuilderSome(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GetOptU32WithBuilderNone"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetOptU32WithBuilderNone(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("MAPU32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBMAPU32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETMAPU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETMAPU32MYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("DOUBLEMAP"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::DoubleMap { - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - key2_hasher: StorageHasher::Blake2_128Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructDOUBLEMAP(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("DOUBLEMAP2"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::DoubleMap { - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("[u8; 4]"), - key2_hasher: StorageHasher::Blake2_128Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructDOUBLEMAP2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE1"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("(::std::option::Option,)")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE1(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE2"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("([[(u16, Option<()>); 32]; 12], u32)")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE3"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("[u32; 25]")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE3(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("NMAP"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::NMap { - keys: DecodeDifferent::Encode(&["u32", "u16"]), - hashers: DecodeDifferent::Encode(&[StorageHasher::Blake2_128Concat, StorageHasher::Twox64Concat]), - value: DecodeDifferent::Encode("u8"), - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructNMAP(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("NMAP2"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::NMap { - keys: DecodeDifferent::Encode(&["u32"]), - hashers: DecodeDifferent::Encode(&[StorageHasher::Blake2_128Concat]), - value: DecodeDifferent::Encode("u8"), - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructNMAP(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ] - ), + entries: DecodeDifferent::Encode(&[ + StorageEntryMetadata { + name: DecodeDifferent::Encode("U32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[" Hello, this is doc!"]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBU32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("U32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBU32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Origin2")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBGETU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32WITHCONFIG"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETU32WITHCONFIG(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIG"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIG(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32MYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32WITHCONFIGMYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETU32WITHCONFIGMYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIGMYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEFOPT"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIGMYDEFOPT(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GetU32WithBuilder"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGetU32WithBuilder(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GetOptU32WithBuilderSome"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGetOptU32WithBuilderSome(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GetOptU32WithBuilderNone"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGetOptU32WithBuilderNone(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("MAPU32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBMAPU32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETMAPU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETMAPU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBGETMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETMAPU32MYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("DOUBLEMAP"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("u32"), + key2: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + key2_hasher: StorageHasher::Blake2_128Concat, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructDOUBLEMAP( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("DOUBLEMAP2"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("u32"), + key2: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("[u8; 4]"), + key2_hasher: StorageHasher::Blake2_128Concat, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructDOUBLEMAP2( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE1"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode( + "(::std::option::Option,)", + )), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE1( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE2"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode( + "([[(u16, Option<()>); 32]; 12], u32)", + )), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE2( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE3"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("[u32; 25]")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE3( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("NMAP"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::NMap { + keys: DecodeDifferent::Encode(&["u32", "u16"]), + hashers: DecodeDifferent::Encode(&[ + StorageHasher::Blake2_128Concat, + StorageHasher::Twox64Concat, + ]), + value: DecodeDifferent::Encode("u8"), + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructNMAP( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("NMAP2"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::NMap { + keys: DecodeDifferent::Encode(&["u32"]), + hashers: DecodeDifferent::Encode(&[StorageHasher::Blake2_128Concat]), + value: DecodeDifferent::Encode("u8"), + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructNMAP( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), }; #[test] fn storage_info() { use frame_support::{ - StorageHasher, - traits::{StorageInfoTrait, StorageInfo}, pallet_prelude::*, + traits::{StorageInfo, StorageInfoTrait}, + StorageHasher, }; let prefix = |pallet_name, storage_name| { let mut res = [0u8; 32]; @@ -713,9 +717,9 @@ mod test2 { #[test] fn storage_info() { use frame_support::{ - StorageHasher, - traits::{StorageInfoTrait, StorageInfo}, pallet_prelude::*, + traits::{StorageInfo, StorageInfoTrait}, + StorageHasher, }; let prefix = |pallet_name, storage_name| { let mut res = [0u8; 32]; @@ -757,7 +761,6 @@ mod test2 { ], ); } - } #[cfg(test)] @@ -791,8 +794,8 @@ mod test3 { #[cfg(test)] #[allow(dead_code)] mod test_append_and_len { + use codec::{Decode, Encode}; use sp_io::TestExternalities; - use codec::{Encode, Decode}; pub trait Config: frame_support_test::Config {} diff --git a/substrate/frame/support/test/tests/derive_no_bound.rs b/substrate/frame/support/test/tests/derive_no_bound.rs index 3081a332b72c162d154de52a13919e9e84d3057f..457ece8b859010839185334eab605a7f4cf4b7c8 100644 --- a/substrate/frame/support/test/tests/derive_no_bound.rs +++ b/substrate/frame/support/test/tests/derive_no_bound.rs @@ -19,7 +19,7 @@ //! RuntimeDebugNoBound use frame_support::{ - DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, DefaultNoBound, + CloneNoBound, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; #[derive(RuntimeDebugNoBound)] @@ -59,7 +59,7 @@ fn test_struct_named() { phantom: Default::default(), }; - let a_default: StructNamed:: = Default::default(); + let a_default: StructNamed = Default::default(); assert_eq!(a_default.a, 0); assert_eq!(a_default.b, 0); assert_eq!(a_default.c, 0); @@ -90,14 +90,9 @@ struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData< #[test] fn test_struct_unnamed() { - let a_1 = StructUnnamed::( - 1, - 2, - 3, - Default::default(), - ); + let a_1 = StructUnnamed::(1, 2, 3, Default::default()); - let a_default: StructUnnamed:: = Default::default(); + let a_default: StructUnnamed = Default::default(); assert_eq!(a_default.0, 0); assert_eq!(a_default.1, 0); assert_eq!(a_default.2, 0); @@ -108,17 +103,9 @@ fn test_struct_unnamed() { assert_eq!(a_2.1, 2); assert_eq!(a_2.2, 3); assert_eq!(a_2, a_1); - assert_eq!( - format!("{:?}", a_1), - String::from("StructUnnamed(1, 2, 3, PhantomData)") - ); + assert_eq!(format!("{:?}", a_1), String::from("StructUnnamed(1, 2, 3, PhantomData)")); - let b = StructUnnamed::( - 1, - 2, - 4, - Default::default(), - ); + let b = StructUnnamed::(1, 2, 4, Default::default()); assert!(b != a_1); } @@ -126,12 +113,7 @@ fn test_struct_unnamed() { #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum { VariantUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>), - VariantNamed { - a: u32, - b: u64, - c: T::C, - phantom: core::marker::PhantomData<(U, V)>, - }, + VariantNamed { a: u32, b: u64, c: T::C, phantom: core::marker::PhantomData<(U, V)> }, VariantUnit, VariantUnit2, } @@ -139,11 +121,7 @@ enum Enum { // enum that will have a named default. #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum2 { - VariantNamed { - a: u32, - b: u64, - c: T::C, - }, + VariantNamed { a: u32, b: u64, c: T::C }, VariantUnnamed(u32, u64, T::C), VariantUnit, VariantUnit2, @@ -153,18 +131,14 @@ enum Enum2 { #[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum3 { VariantUnit, - VariantNamed { - a: u32, - b: u64, - c: T::C, - }, + VariantNamed { a: u32, b: u64, c: T::C }, VariantUnnamed(u32, u64, T::C), VariantUnit2, } #[test] fn test_enum() { - type TestEnum = Enum::; + type TestEnum = Enum; let variant_0 = TestEnum::VariantUnnamed(1, 2, 3, Default::default()); let variant_0_bis = TestEnum::VariantUnnamed(1, 2, 4, Default::default()); let variant_1 = TestEnum::VariantNamed { a: 1, b: 2, c: 3, phantom: Default::default() }; @@ -179,14 +153,8 @@ fn test_enum() { TestEnum::VariantUnnamed(0, 0, 0, Default::default()) ); - assert_eq!( - Enum2::::default(), - Enum2::::VariantNamed { a: 0, b: 0, c: 0}, - ); - assert_eq!( - Enum3::::default(), - Enum3::::VariantUnit, - ); + assert_eq!(Enum2::::default(), Enum2::::VariantNamed { a: 0, b: 0, c: 0 },); + assert_eq!(Enum3::::default(), Enum3::::VariantUnit,); assert!(variant_0 != variant_0_bis); assert!(variant_1 != variant_1_bis); @@ -216,12 +184,6 @@ fn test_enum() { format!("{:?}", variant_1), String::from("Enum::VariantNamed { a: 1, b: 2, c: 3, phantom: PhantomData }"), ); - assert_eq!( - format!("{:?}", variant_2), - String::from("Enum::VariantUnit"), - ); - assert_eq!( - format!("{:?}", variant_3), - String::from("Enum::VariantUnit2"), - ); + assert_eq!(format!("{:?}", variant_2), String::from("Enum::VariantUnit"),); + assert_eq!(format!("{:?}", variant_3), String::from("Enum::VariantUnit2"),); } diff --git a/substrate/frame/support/test/tests/final_keys.rs b/substrate/frame/support/test/tests/final_keys.rs index 9839a3d3b2d94be33f8bbb735ab01c1408ae0dcb..e89f961d893f5b167c86bec7ca787d440a37aa7d 100644 --- a/substrate/frame/support/test/tests/final_keys.rs +++ b/substrate/frame/support/test/tests/final_keys.rs @@ -15,10 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::storage::unhashed; use codec::Encode; -use frame_support::{StorageDoubleMap, StorageMap, StorageValue, StoragePrefixedMap}; -use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; +use frame_support::{ + storage::unhashed, StorageDoubleMap, StorageMap, StoragePrefixedMap, StorageValue, +}; +use sp_io::{ + hashing::{blake2_128, twox_128, twox_64}, + TestExternalities, +}; mod no_instance { pub trait Config: frame_support_test::Config {} @@ -27,7 +31,7 @@ mod no_instance { pub struct Module for enum Call where origin: T::Origin, system=frame_support_test {} } - frame_support::decl_storage!{ + frame_support::decl_storage! { trait Store for Module as FinalKeysNone { pub Value config(value): u32; @@ -52,7 +56,7 @@ mod instance { for enum Call where origin: T::Origin, system=frame_support_test {} } - frame_support::decl_storage!{ + frame_support::decl_storage! { trait Store for Module, I: Instance = DefaultInstance> as FinalKeysSome { diff --git a/substrate/frame/support/test/tests/genesisconfig.rs b/substrate/frame/support/test/tests/genesisconfig.rs index a30b021d13e51fb4fcf139bb6be268cb36c945ff..d488e8bfbfaff14728ef2555f40c485a325680ca 100644 --- a/substrate/frame/support/test/tests/genesisconfig.rs +++ b/substrate/frame/support/test/tests/genesisconfig.rs @@ -40,7 +40,5 @@ impl Config for Test {} #[test] fn init_genesis_config() { - GenesisConfig:: { - t: Default::default(), - }; + GenesisConfig:: { t: Default::default() }; } diff --git a/substrate/frame/support/test/tests/instance.rs b/substrate/frame/support/test/tests/instance.rs index 7d18a8368edab2a54c12ad441de2ab65e88cfed4..65a2c11d0d13f9f1dd34758e2e4035727a29950a 100644 --- a/substrate/frame/support/test/tests/instance.rs +++ b/substrate/frame/support/test/tests/instance.rs @@ -15,20 +15,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -#![recursion_limit="128"] +#![recursion_limit = "128"] -use codec::{Codec, EncodeLike, Encode, Decode}; -use sp_runtime::{generic, BuildStorage, traits::{BlakeTwo256, Verify}}; +use codec::{Codec, Decode, Encode, EncodeLike}; use frame_support::{ - Parameter, traits::Get, parameter_types, + inherent::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}, metadata::{ - DecodeDifferent, StorageMetadata, StorageEntryModifier, StorageEntryType, DefaultByteGetter, - StorageEntryMetadata, StorageHasher, + DecodeDifferent, DefaultByteGetter, StorageEntryMetadata, StorageEntryModifier, + StorageEntryType, StorageHasher, StorageMetadata, }, - StorageValue, StorageMap, StorageDoubleMap, - inherent::{ProvideInherent, InherentData, InherentIdentifier, MakeFatalError}, + parameter_types, + traits::Get, + Parameter, StorageDoubleMap, StorageMap, StorageValue, +}; +use sp_core::{sr25519, H256}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Verify}, + BuildStorage, }; -use sp_core::{H256, sr25519}; mod system; @@ -41,7 +46,10 @@ mod module1 { use super::*; use sp_std::ops::Add; - pub trait Config: system::Config where ::BlockNumber: From { + pub trait Config: system::Config + where + ::BlockNumber: From, + { type Event: From> + Into<::Event>; type Origin: From>; type SomeParameter: Get; @@ -101,15 +109,19 @@ mod module1 { } #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I> where T::BlockNumber: From { + pub enum Origin, I> + where + T::BlockNumber: From, + { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - impl, I: Instance> ProvideInherent for Module where - T::BlockNumber: From + impl, I: Instance> ProvideInherent for Module + where + T::BlockNumber: From, { type Call = Call; type Error = MakeFatalError<()>; @@ -119,7 +131,10 @@ mod module1 { unimplemented!(); } - fn check_inherent(_: &Self::Call, _: &InherentData) -> std::result::Result<(), Self::Error> { + fn check_inherent( + _: &Self::Call, + _: &InherentData, + ) -> std::result::Result<(), Self::Error> { unimplemented!(); } @@ -135,7 +150,7 @@ mod module1 { mod module2 { use super::*; - pub trait Config: system::Config { + pub trait Config: system::Config { type Amount: Parameter + Default; type Event: From> + Into<::Event>; type Origin: From>; @@ -167,7 +182,7 @@ mod module2 { } #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug, Encode, Decode)] - pub enum Origin, I=DefaultInstance> { + pub enum Origin, I = DefaultInstance> { Members(u32), _Phantom(std::marker::PhantomData<(T, I)>), } @@ -183,7 +198,10 @@ mod module2 { unimplemented!(); } - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> std::result::Result<(), Self::Error> { + fn check_inherent( + _call: &Self::Call, + _data: &InherentData, + ) -> std::result::Result<(), Self::Error> { unimplemented!(); } @@ -198,7 +216,9 @@ mod module2 { mod module3 { use super::*; - pub trait Config: module2::Config + module2::Config + system::Config { + pub trait Config: + module2::Config + module2::Config + system::Config + { type Currency: Currency; type Currency2: Currency; } @@ -255,7 +275,7 @@ pub type BlockNumber = u64; pub type Index = u64; impl system::Config for Runtime { - type BaseCallFilter= frame_support::traits::AllowAll; + type BaseCallFilter = frame_support::traits::AllowAll; type Hash = H256; type Origin = Origin; type BlockNumber = BlockNumber; @@ -298,15 +318,9 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig{ - module_1_1: module1::GenesisConfig { - value: 3, - test: 2, - }, - module_1_2: module1::GenesisConfig { - value: 4, - test: 5, - }, + GenesisConfig { + module_1_1: module1::GenesisConfig { value: 3, test: 2 }, + module_1_2: module1::GenesisConfig { value: 4, test: 5 }, module_2: module2::GenesisConfig { value: 4, map: vec![(0, 0)], @@ -319,14 +333,17 @@ fn new_test_ext() -> sp_io::TestExternalities { }, module_2_2: Default::default(), module_2_3: Default::default(), - }.build_storage().unwrap().into() + } + .build_storage() + .unwrap() + .into() } #[test] fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), - children_default: std::collections::HashMap::new() + children_default: std::collections::HashMap::new(), }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); @@ -359,7 +376,7 @@ fn storage_with_instance_basic_operation() { assert_eq!(Value::get(), 1); assert_eq!(Value::take(), 1); assert_eq!(Value::get(), 0); - Value::mutate(|a| *a=2); + Value::mutate(|a| *a = 2); assert_eq!(Value::get(), 2); Value::kill(); assert_eq!(Value::exists(), false); @@ -372,7 +389,7 @@ fn storage_with_instance_basic_operation() { assert_eq!(Map::get(key), 1); assert_eq!(Map::take(key), 1); assert_eq!(Map::get(key), 0); - Map::mutate(key, |a| *a=2); + Map::mutate(key, |a| *a = 2); assert_eq!(Map::get(key), 2); Map::remove(key); assert_eq!(Map::contains_key(key), false); @@ -386,7 +403,7 @@ fn storage_with_instance_basic_operation() { assert_eq!(DoubleMap::get(&key1, &key2), 1); assert_eq!(DoubleMap::take(&key1, &key2), 1); assert_eq!(DoubleMap::get(&key1, &key2), 0); - DoubleMap::mutate(&key1, &key2, |a| *a=2); + DoubleMap::mutate(&key1, &key2, |a| *a = 2); assert_eq!(DoubleMap::get(&key1, &key2), 2); DoubleMap::remove(&key1, &key2); assert_eq!(DoubleMap::get(&key1, &key2), 0); @@ -395,60 +412,48 @@ fn storage_with_instance_basic_operation() { const EXPECTED_METADATA: StorageMetadata = StorageMetadata { prefix: DecodeDifferent::Encode("Instance2Module2"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("Value"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Amount")), - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructValue( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), + entries: DecodeDifferent::Encode(&[ + StorageEntryMetadata { + name: DecodeDifferent::Encode("Value"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Amount")), + default: DecodeDifferent::Encode(DefaultByteGetter(&module2::__GetByteStructValue( + std::marker::PhantomData::<(Runtime, module2::Instance2)>, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("Map"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Identity, + key: DecodeDifferent::Encode("u64"), + value: DecodeDifferent::Encode("u64"), + unused: false, }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("Map"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Identity, - key: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode("u64"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructMap( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), + default: DecodeDifferent::Encode(DefaultByteGetter(&module2::__GetByteStructMap( + std::marker::PhantomData::<(Runtime, module2::Instance2)>, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("DoubleMap"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Identity, + key2_hasher: StorageHasher::Identity, + key1: DecodeDifferent::Encode("u64"), + key2: DecodeDifferent::Encode("u64"), + value: DecodeDifferent::Encode("u64"), }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("DoubleMap"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap { - hasher: StorageHasher::Identity, - key2_hasher: StorageHasher::Identity, - key1: DecodeDifferent::Encode("u64"), - key2: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode("u64"), - }, - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructDoubleMap( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) + default: DecodeDifferent::Encode(DefaultByteGetter( + &module2::__GetByteStructDoubleMap( + std::marker::PhantomData::<(Runtime, module2::Instance2)>, ), - documentation: DecodeDifferent::Encode(&[]), - } - ] - ) + )), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), }; #[test] diff --git a/substrate/frame/support/test/tests/issue2219.rs b/substrate/frame/support/test/tests/issue2219.rs index 78a79055a389d491903fc469afb22741239b6407..dd73700cf5ca242de4dc660880c07c4a45effcad 100644 --- a/substrate/frame/support/test/tests/issue2219.rs +++ b/substrate/frame/support/test/tests/issue2219.rs @@ -15,22 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use frame_support::sp_runtime::generic; -use frame_support::sp_runtime::traits::{BlakeTwo256, Verify}; -use frame_support::codec::{Encode, Decode}; -use sp_core::{H256, sr25519}; -use serde::{Serialize, Deserialize}; +use frame_support::{ + codec::{Decode, Encode}, + sp_runtime::{ + generic, + traits::{BlakeTwo256, Verify}, + }, +}; +use serde::{Deserialize, Serialize}; +use sp_core::{sr25519, H256}; mod system; mod module { use super::*; - pub type Request = ( - ::AccountId, - Role, - ::BlockNumber, - ); + pub type Request = + (::AccountId, Role, ::BlockNumber); pub type Requests = Vec>; #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] @@ -89,14 +90,12 @@ mod module { #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] pub struct Data { - pub data: T::BlockNumber, + pub data: T::BlockNumber, } impl Default for Data { fn default() -> Self { - Self { - data: T::BlockNumber::default(), - } + Self { data: T::BlockNumber::default() } } } @@ -185,9 +184,6 @@ frame_support::construct_runtime!( #[test] fn create_genesis_config() { GenesisConfig { - module: module::GenesisConfig { - request_life_time: 0, - enable_storage_role: true, - } + module: module::GenesisConfig { request_life_time: 0, enable_storage_role: true }, }; } diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 59ebd2e71e599be285f483a6a9a7b36d70eab6ad..7385eeb6ad74fe458b64ae866e810c1c98ba60d4 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -16,48 +16,87 @@ // limitations under the License. use frame_support::{ - weights::{DispatchInfo, DispatchClass, Pays, GetDispatchInfo}, + dispatch::{Parameter, UnfilteredDispatchable}, + storage::unhashed, traits::{ - GetCallName, OnInitialize, OnFinalize, OnRuntimeUpgrade, GetPalletVersion, OnGenesis, + GetCallName, GetPalletVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, }, - dispatch::{UnfilteredDispatchable, Parameter}, - storage::unhashed, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, +}; +use sp_io::{ + hashing::{blake2_128, twox_128, twox_64}, + TestExternalities, }; use sp_runtime::DispatchError; -use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; pub struct SomeType1; -impl From for u64 { fn from(_t: SomeType1) -> Self { 0u64 } } +impl From for u64 { + fn from(_t: SomeType1) -> Self { + 0u64 + } +} pub struct SomeType2; -impl From for u64 { fn from(_t: SomeType2) -> Self { 100u64 } } +impl From for u64 { + fn from(_t: SomeType2) -> Self { + 100u64 + } +} pub struct SomeType3; -impl From for u64 { fn from(_t: SomeType3) -> Self { 0u64 } } +impl From for u64 { + fn from(_t: SomeType3) -> Self { + 0u64 + } +} pub struct SomeType4; -impl From for u64 { fn from(_t: SomeType4) -> Self { 0u64 } } +impl From for u64 { + fn from(_t: SomeType4) -> Self { + 0u64 + } +} pub struct SomeType5; -impl From for u64 { fn from(_t: SomeType5) -> Self { 0u64 } } +impl From for u64 { + fn from(_t: SomeType5) -> Self { + 0u64 + } +} pub struct SomeType6; -impl From for u64 { fn from(_t: SomeType6) -> Self { 0u64 } } +impl From for u64 { + fn from(_t: SomeType6) -> Self { + 0u64 + } +} pub struct SomeType7; -impl From for u64 { fn from(_t: SomeType7) -> Self { 0u64 } } +impl From for u64 { + fn from(_t: SomeType7) -> Self { + 0u64 + } +} -pub trait SomeAssociation1 { type _1: Parameter + codec::MaxEncodedLen; } -impl SomeAssociation1 for u64 { type _1 = u64; } +pub trait SomeAssociation1 { + type _1: Parameter + codec::MaxEncodedLen; +} +impl SomeAssociation1 for u64 { + type _1 = u64; +} -pub trait SomeAssociation2 { type _2: Parameter + codec::MaxEncodedLen; } -impl SomeAssociation2 for u64 { type _2 = u64; } +pub trait SomeAssociation2 { + type _2: Parameter + codec::MaxEncodedLen; +} +impl SomeAssociation2 for u64 { + type _2 = u64; +} #[frame_support::pallet] pub mod pallet { use super::{ - SomeType1, SomeType2, SomeType3, SomeType4, SomeType5, SomeType6, SomeType7, - SomeAssociation1, SomeAssociation2, + SomeAssociation1, SomeAssociation2, SomeType1, SomeType2, SomeType3, SomeType4, SomeType5, + SomeType6, SomeType7, }; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -66,7 +105,8 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config - where ::AccountId: From + SomeAssociation1, + where + ::AccountId: From + SomeAssociation1, { /// Some comment /// Some comment @@ -88,14 +128,19 @@ pub mod pallet { #[pallet::extra_constants] impl Pallet - where T::AccountId: From + SomeAssociation1 + From, + where + T::AccountId: From + SomeAssociation1 + From, { /// Some doc /// Some doc - fn some_extra() -> T::AccountId { SomeType2.into() } + fn some_extra() -> T::AccountId { + SomeType2.into() + } /// Some doc - fn some_extra_extra() -> T::AccountId { SomeType1.into() } + fn some_extra_extra() -> T::AccountId { + SomeType1.into() + } } #[pallet::pallet] @@ -105,7 +150,8 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet - where T::AccountId: From + From + SomeAssociation1, + where + T::AccountId: From + From + SomeAssociation1, { fn on_initialize(_: BlockNumberFor) -> Weight { T::AccountId::from(SomeType1); // Test for where clause @@ -132,7 +178,8 @@ pub mod pallet { #[pallet::call] impl Pallet - where T::AccountId: From + From + SomeAssociation1 + where + T::AccountId: From + From + SomeAssociation1, { /// Doc comment put in metadata #[pallet::weight(Weight::from(*_foo))] @@ -165,9 +212,7 @@ pub mod pallet { // Test for DispatchResult return type #[pallet::weight(1)] - pub fn foo_no_post_info( - _origin: OriginFor, - ) -> DispatchResult { + pub fn foo_no_post_info(_origin: OriginFor) -> DispatchResult { Ok(()) } } @@ -181,7 +226,10 @@ pub mod pallet { #[pallet::event] #[pallet::metadata(BalanceOf = "Balance", u32 = "Other")] #[pallet::generate_deposit(fn deposit_event)] - pub enum Event where T::AccountId: SomeAssociation1 + From{ + pub enum Event + where + T::AccountId: SomeAssociation1 + From, + { /// doc comment put in metadata Proposed(::AccountId), /// doc @@ -191,8 +239,10 @@ pub mod pallet { } #[pallet::storage] - pub type ValueWhereClause where T::AccountId: SomeAssociation2 = - StorageValue<_, ::_2>; + pub type ValueWhereClause + where + T::AccountId: SomeAssociation2, + = StorageValue<_, ::_2>; #[pallet::storage] pub type Value = StorageValue; @@ -203,28 +253,32 @@ pub mod pallet { #[pallet::type_value] pub fn MyDefault() -> u16 - where T::AccountId: From + From + SomeAssociation1 + where + T::AccountId: From + From + SomeAssociation1, { T::AccountId::from(SomeType7); // Test where clause works 4u16 } #[pallet::storage] - pub type Map where T::AccountId: From = - StorageMap<_, Blake2_128Concat, u8, u16, ValueQuery, MyDefault>; + pub type Map + where + T::AccountId: From, + = StorageMap<_, Blake2_128Concat, u8, u16, ValueQuery, MyDefault>; #[pallet::storage] - pub type Map2 = StorageMap< - Hasher = Twox64Concat, Key = u16, Value = u32, MaxValues = ConstU32<3> - >; + pub type Map2 = + StorageMap>; #[pallet::storage] pub type DoubleMap = StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; #[pallet::storage] pub type DoubleMap2 = StorageDoubleMap< - Hasher1 = Twox64Concat, Key1 = u16, - Hasher2 = Blake2_128Concat, Key2 = u32, + Hasher1 = Twox64Concat, + Key1 = u16, + Hasher2 = Blake2_128Concat, + Key2 = u32, Value = u64, MaxValues = ConstU32<5>, >; @@ -255,26 +309,14 @@ pub mod pallet { #[cfg(feature = "conditional-storage")] #[pallet::storage] #[pallet::getter(fn conditional_double_map)] - pub type ConditionalDoubleMap = StorageDoubleMap< - _, - Blake2_128Concat, - u8, - Twox64Concat, - u16, - u32, - >; + pub type ConditionalDoubleMap = + StorageDoubleMap<_, Blake2_128Concat, u8, Twox64Concat, u16, u32>; #[cfg(feature = "conditional-storage")] #[pallet::storage] #[pallet::getter(fn conditional_nmap)] - pub type ConditionalNMap = StorageNMap< - _, - ( - storage::Key, - storage::Key, - ), - u32, - >; + pub type ConditionalNMap = + StorageNMap<_, (storage::Key, storage::Key), u32>; #[pallet::genesis_config] #[derive(Default)] @@ -284,7 +326,8 @@ pub mod pallet { #[pallet::genesis_build] impl GenesisBuild for GenesisConfig - where T::AccountId: From + SomeAssociation1 + From + where + T::AccountId: From + SomeAssociation1 + From, { fn build(&self) { T::AccountId::from(SomeType1); // Test for where clause @@ -298,17 +341,15 @@ pub mod pallet { #[pallet::validate_unsigned] impl ValidateUnsigned for Pallet - where T::AccountId: From + SomeAssociation1 + From + From + where + T::AccountId: From + SomeAssociation1 + From + From, { type Call = Call; - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call - ) -> TransactionValidity { + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { T::AccountId::from(SomeType1); // Test for where clause T::AccountId::from(SomeType5); // Test for where clause if matches!(call, Call::foo_transactional(_)) { - return Ok(ValidTransaction::default()); + return Ok(ValidTransaction::default()) } Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) } @@ -316,7 +357,8 @@ pub mod pallet { #[pallet::inherent] impl ProvideInherent for Pallet - where T::AccountId: From + SomeAssociation1 + From + From + where + T::AccountId: From + SomeAssociation1 + From + From, { type Call = Call; type Error = InherentError; @@ -369,13 +411,14 @@ pub mod pallet { // Test that a pallet with non generic event and generic genesis_config is correctly handled #[frame_support::pallet] pub mod pallet2 { - use super::{SomeType1, SomeAssociation1}; + use super::{SomeAssociation1, SomeType1}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::config] pub trait Config: frame_system::Config - where ::AccountId: From + SomeAssociation1, + where + ::AccountId: From + SomeAssociation1, { type Event: From + IsType<::Event>; } @@ -385,16 +428,13 @@ pub mod pallet2 { pub struct Pallet(_); #[pallet::hooks] - impl Hooks> for Pallet - where T::AccountId: From + SomeAssociation1, + impl Hooks> for Pallet where + T::AccountId: From + SomeAssociation1 { } #[pallet::call] - impl Pallet - where T::AccountId: From + SomeAssociation1, - { - } + impl Pallet where T::AccountId: From + SomeAssociation1 {} #[pallet::storage] pub type SomeValue = StorageValue<_, Vec>; @@ -407,24 +447,25 @@ pub mod pallet2 { #[pallet::genesis_config] pub struct GenesisConfig - where T::AccountId: From + SomeAssociation1, + where + T::AccountId: From + SomeAssociation1, { phantom: PhantomData, } impl Default for GenesisConfig - where T::AccountId: From + SomeAssociation1, + where + T::AccountId: From + SomeAssociation1, { fn default() -> Self { - GenesisConfig { - phantom: Default::default(), - } + GenesisConfig { phantom: Default::default() } } } #[pallet::genesis_build] impl GenesisBuild for GenesisConfig - where T::AccountId: From + SomeAssociation1, + where + T::AccountId: From + SomeAssociation1, { fn build(&self) {} } @@ -441,9 +482,9 @@ pub mod pallet3 { } frame_support::parameter_types!( - pub const MyGetParam: u32= 10; - pub const MyGetParam2: u32= 11; - pub const MyGetParam3: u32= 12; + pub const MyGetParam: u32 = 10; + pub const MyGetParam2: u32 = 11; + pub const MyGetParam3: u32 = 12; pub const BlockHashCount: u32 = 250; ); @@ -505,13 +546,20 @@ fn transactional_works() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - pallet::Call::::foo_transactional(0).dispatch_bypass_filter(None.into()) - .err().unwrap(); + pallet::Call::::foo_transactional(0) + .dispatch_bypass_filter(None.into()) + .err() + .unwrap(); assert!(frame_system::Pallet::::events().is_empty()); - pallet::Call::::foo_transactional(1).dispatch_bypass_filter(None.into()).unwrap(); + pallet::Call::::foo_transactional(1) + .dispatch_bypass_filter(None.into()) + .unwrap(); assert_eq!( - frame_system::Pallet::::events().iter().map(|e| &e.event).collect::>(), + frame_system::Pallet::::events() + .iter() + .map(|e| &e.event) + .collect::>(), vec![&Event::Example(pallet::Event::Something(0))], ); }) @@ -522,11 +570,7 @@ fn call_expand() { let call_foo = pallet::Call::::foo(3, 0); assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { - weight: 3, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - } + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -547,11 +591,7 @@ fn error_expand() { ); assert_eq!( DispatchError::from(pallet::Error::::InsufficientProposersBalance), - DispatchError::Module { - index: 1, - error: 0, - message: Some("InsufficientProposersBalance"), - }, + DispatchError::Module { index: 1, error: 0, message: Some("InsufficientProposersBalance") }, ); } @@ -568,13 +608,17 @@ fn inherent_expand() { traits::EnsureInherentsAreFirst, }; use sp_core::Hasher; - use sp_runtime::{traits::{BlakeTwo256, Header}, Digest}; + use sp_runtime::{ + traits::{BlakeTwo256, Header}, + Digest, + }; let inherents = InherentData::new().create_extrinsics(); - let expected = vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, - ]; + let expected = vec![UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info()), + signature: None, + }]; assert_eq!(expected, inherents); let block = Block::new( @@ -586,8 +630,14 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 0)), signature: None }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info()), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo(1, 0)), + signature: None, + }, ], ); @@ -602,8 +652,14 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(0, 0)), signature: None }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info()), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo(0, 0)), + signature: None, + }, ], ); @@ -617,9 +673,10 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_transactional(0)), signature: None }, - ], + vec![UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_transactional(0)), + signature: None, + }], ); let mut inherent = InherentData::new(); @@ -634,9 +691,10 @@ fn inherent_expand() { BlakeTwo256::hash(b"test"), Digest::default(), ), - vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: Some((1, (), ())) }, - ], + vec![UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info()), + signature: Some((1, (), ())), + }], ); let mut inherent = InherentData::new(); @@ -652,8 +710,14 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 1)), signature: None }, - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_transactional(0)), signature: None }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo(1, 1)), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_transactional(0)), + signature: None, + }, ], ); @@ -668,9 +732,18 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 1)), signature: None }, - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_transactional(0)), signature: None }, - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo(1, 1)), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_transactional(0)), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info()), + signature: None, + }, ], ); @@ -685,9 +758,18 @@ fn inherent_expand() { Digest::default(), ), vec![ - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 1)), signature: None }, - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo(1, 0)), signature: Some((1, (), ())) }, - UncheckedExtrinsic { function: Call::Example(pallet::Call::foo_no_post_info()), signature: None }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo(1, 1)), + signature: None, + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo(1, 0)), + signature: Some((1, (), ())), + }, + UncheckedExtrinsic { + function: Call::Example(pallet::Call::foo_no_post_info()), + signature: None, + }, ], ); @@ -697,7 +779,8 @@ fn inherent_expand() { #[test] fn validate_unsigned_expand() { use frame_support::pallet_prelude::{ - InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, ValidateUnsigned, + InvalidTransaction, TransactionSource, TransactionValidityError, ValidTransaction, + ValidateUnsigned, }; let call = pallet::Call::::foo_no_post_info(); @@ -733,8 +816,7 @@ fn pallet_expand_deposit_event() { #[test] fn storage_expand() { - use frame_support::pallet_prelude::*; - use frame_support::storage::StoragePrefixedMap; + use frame_support::{pallet_prelude::*, storage::StoragePrefixedMap}; fn twox_64_concat(d: &[u8]) -> Vec { let mut v = twox_64(d).to_vec(); @@ -850,8 +932,8 @@ fn pallet_on_genesis() { #[test] fn metadata() { - use frame_metadata::*; use codec::{Decode, Encode}; + use frame_metadata::*; let expected_pallet_metadata = ModuleMetadata { index: 1, @@ -862,11 +944,9 @@ fn metadata() { StorageEntryMetadata { name: DecodeDifferent::Decoded("ValueWhereClause".to_string()), modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain( - DecodeDifferent::Decoded( - "::_2".to_string() - ), - ), + ty: StorageEntryType::Plain(DecodeDifferent::Decoded( + "::_2".to_string(), + )), default: DecodeDifferent::Decoded(vec![0]), documentation: DecodeDifferent::Decoded(vec![]), }, @@ -939,9 +1019,7 @@ fn metadata() { modifier: StorageEntryModifier::Optional, ty: StorageEntryType::NMap { keys: DecodeDifferent::Decoded(vec!["u8".to_string()]), - hashers: DecodeDifferent::Decoded(vec![ - StorageHasher::Blake2_128Concat, - ]), + hashers: DecodeDifferent::Decoded(vec![StorageHasher::Blake2_128Concat]), value: DecodeDifferent::Decoded("u32".to_string()), }, default: DecodeDifferent::Decoded(vec![0]), @@ -951,10 +1029,7 @@ fn metadata() { name: DecodeDifferent::Decoded("NMap2".to_string()), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::NMap { - keys: DecodeDifferent::Decoded(vec![ - "u16".to_string(), - "u32".to_string(), - ]), + keys: DecodeDifferent::Decoded(vec!["u16".to_string(), "u32".to_string()]), hashers: DecodeDifferent::Decoded(vec![ StorageHasher::Twox64Concat, StorageHasher::Blake2_128Concat, @@ -964,14 +1039,16 @@ fn metadata() { default: DecodeDifferent::Decoded(vec![0]), documentation: DecodeDifferent::Decoded(vec![]), }, - #[cfg(feature = "conditional-storage")] StorageEntryMetadata { + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { name: DecodeDifferent::Decoded("ConditionalValue".to_string()), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Plain(DecodeDifferent::Decoded("u32".to_string())), default: DecodeDifferent::Decoded(vec![0]), documentation: DecodeDifferent::Decoded(vec![]), }, - #[cfg(feature = "conditional-storage")] StorageEntryMetadata { + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { name: DecodeDifferent::Decoded("ConditionalMap".to_string()), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { @@ -983,7 +1060,8 @@ fn metadata() { default: DecodeDifferent::Decoded(vec![0]), documentation: DecodeDifferent::Decoded(vec![]), }, - #[cfg(feature = "conditional-storage")] StorageEntryMetadata { + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { name: DecodeDifferent::Decoded("ConditionalDoubleMap".to_string()), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::DoubleMap { @@ -996,7 +1074,8 @@ fn metadata() { default: DecodeDifferent::Decoded(vec![0]), documentation: DecodeDifferent::Decoded(vec![]), }, - #[cfg(feature = "conditional-storage")] StorageEntryMetadata { + #[cfg(feature = "conditional-storage")] + StorageEntryMetadata { name: DecodeDifferent::Decoded("ConditionalNMap".to_string()), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::NMap { @@ -1023,22 +1102,20 @@ fn metadata() { FunctionArgumentMetadata { name: DecodeDifferent::Decoded("_bar".to_string()), ty: DecodeDifferent::Decoded("u32".to_string()), - } + }, ]), documentation: DecodeDifferent::Decoded(vec![ - " Doc comment put in metadata".to_string(), + " Doc comment put in metadata".to_string() ]), }, FunctionMetadata { name: DecodeDifferent::Decoded("foo_transactional".to_string()), - arguments: DecodeDifferent::Decoded(vec![ - FunctionArgumentMetadata { - name: DecodeDifferent::Decoded("foo".to_string()), - ty: DecodeDifferent::Decoded("Compact".to_string()), - } - ]), + arguments: DecodeDifferent::Decoded(vec![FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("foo".to_string()), + ty: DecodeDifferent::Decoded("Compact".to_string()), + }]), documentation: DecodeDifferent::Decoded(vec![ - " Doc comment put in metadata".to_string(), + " Doc comment put in metadata".to_string() ]), }, FunctionMetadata { @@ -1050,7 +1127,9 @@ fn metadata() { event: Some(DecodeDifferent::Decoded(vec![ EventMetadata { name: DecodeDifferent::Decoded("Proposed".to_string()), - arguments: DecodeDifferent::Decoded(vec!["::AccountId".to_string()]), + arguments: DecodeDifferent::Decoded(vec![ + "::AccountId".to_string() + ]), documentation: DecodeDifferent::Decoded(vec![ " doc comment put in metadata".to_string() ]), @@ -1058,9 +1137,7 @@ fn metadata() { EventMetadata { name: DecodeDifferent::Decoded("Spending".to_string()), arguments: DecodeDifferent::Decoded(vec!["Balance".to_string()]), - documentation: DecodeDifferent::Decoded(vec![ - " doc".to_string() - ]), + documentation: DecodeDifferent::Decoded(vec![" doc".to_string()]), }, EventMetadata { name: DecodeDifferent::Decoded("Something".to_string()), @@ -1069,7 +1146,9 @@ fn metadata() { }, EventMetadata { name: DecodeDifferent::Decoded("SomethingElse".to_string()), - arguments: DecodeDifferent::Decoded(vec!["::_1".to_string()]), + arguments: DecodeDifferent::Decoded(vec![ + "::_1".to_string() + ]), documentation: DecodeDifferent::Decoded(vec![]), }, ])), @@ -1111,19 +1190,15 @@ fn metadata() { name: DecodeDifferent::Decoded("some_extra_extra".to_string()), ty: DecodeDifferent::Decoded("T::AccountId".to_string()), value: DecodeDifferent::Decoded(vec![0, 0, 0, 0, 0, 0, 0, 0]), - documentation: DecodeDifferent::Decoded(vec![ - " Some doc".to_string(), - ]), - }, - ]), - errors: DecodeDifferent::Decoded(vec![ - ErrorMetadata { - name: DecodeDifferent::Decoded("InsufficientProposersBalance".to_string()), - documentation: DecodeDifferent::Decoded(vec![ - " doc comment put into metadata".to_string(), - ]), + documentation: DecodeDifferent::Decoded(vec![" Some doc".to_string()]), }, ]), + errors: DecodeDifferent::Decoded(vec![ErrorMetadata { + name: DecodeDifferent::Decoded("InsufficientProposersBalance".to_string()), + documentation: DecodeDifferent::Decoded(vec![ + " doc comment put into metadata".to_string() + ]), + }]), }; let metadata = match Runtime::metadata().1 { @@ -1155,9 +1230,9 @@ fn test_pallet_info_access() { #[test] fn test_storage_info() { use frame_support::{ - StorageHasher, - traits::{StorageInfoTrait, StorageInfo}, pallet_prelude::*, + traits::{StorageInfo, StorageInfoTrait}, + StorageHasher, }; let prefix = |pallet_name, storage_name| { @@ -1278,14 +1353,12 @@ fn test_storage_info() { assert_eq!( Example2::storage_info(), - vec![ - StorageInfo { - pallet_name: b"Example2".to_vec(), - storage_name: b"SomeValue".to_vec(), - prefix: prefix(b"Example2", b"SomeValue").to_vec(), - max_values: Some(1), - max_size: None, - }, - ], + vec![StorageInfo { + pallet_name: b"Example2".to_vec(), + storage_name: b"SomeValue".to_vec(), + prefix: prefix(b"Example2", b"SomeValue").to_vec(), + max_values: Some(1), + max_size: None, + },], ); } diff --git a/substrate/frame/support/test/tests/pallet_compatibility.rs b/substrate/frame/support/test/tests/pallet_compatibility.rs index 3c055b9f45aef138e9bcc004cff767ab650eec55..35c991432acdd8c23d8c4e20357835d9494e7d0f 100644 --- a/substrate/frame/support/test/tests/pallet_compatibility.rs +++ b/substrate/frame/support/test/tests/pallet_compatibility.rs @@ -23,15 +23,19 @@ impl SomeAssociation for u64 { } mod pallet_old { + use super::SomeAssociation; use frame_support::{ - decl_storage, decl_error, decl_event, decl_module, weights::Weight, traits::Get, Parameter + decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, }; use frame_system::ensure_root; - use super::SomeAssociation; pub trait Config: frame_system::Config { type SomeConst: Get; - type Balance: Parameter + codec::HasCompact + From + Into + Default + type Balance: Parameter + + codec::HasCompact + + From + + Into + + Default + SomeAssociation; type Event: From> + Into<::Event>; } @@ -50,7 +54,10 @@ mod pallet_old { } decl_event!( - pub enum Event where Balance = ::Balance { + pub enum Event + where + Balance = ::Balance, + { /// Dummy event, just here so there's a generic type that's used. Dummy(Balance), } @@ -93,13 +100,17 @@ mod pallet_old { pub mod pallet { use super::SomeAssociation; use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - use frame_system::ensure_root; + use frame_system::{ensure_root, pallet_prelude::*}; #[pallet::config] pub trait Config: frame_system::Config { - type Balance: Parameter + codec::HasCompact + From + Into + Default - + MaybeSerializeDeserialize + SomeAssociation; + type Balance: Parameter + + codec::HasCompact + + From + + Into + + Default + + MaybeSerializeDeserialize + + SomeAssociation; #[pallet::constant] type SomeConst: Get; type Event: From> + IsType<::Event>; @@ -125,7 +136,7 @@ pub mod pallet { #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, - #[pallet::compact] new_value: T::Balance + #[pallet::compact] new_value: T::Balance, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -157,13 +168,22 @@ pub mod pallet { #[pallet::storage] type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; - #[pallet::type_value] pub fn OnFooEmpty() -> T::Balance { 3.into() } + #[pallet::type_value] + pub fn OnFooEmpty() -> T::Balance { + 3.into() + } #[pallet::storage] type Foo = StorageValue<_, T::Balance, ValueQuery, OnFooEmpty>; #[pallet::storage] type Double = StorageDoubleMap< - _, Blake2_128Concat, u32, Twox64Concat, u64, ::A, ValueQuery + _, + Blake2_128Concat, + u32, + Twox64Concat, + u64, + ::A, + ValueQuery, >; #[pallet::genesis_config] @@ -257,9 +277,7 @@ frame_support::construct_runtime!( #[cfg(test)] mod test { - use super::Runtime; - use super::pallet; - use super::pallet_old; + use super::{pallet, pallet_old, Runtime}; use codec::{Decode, Encode}; #[test] @@ -284,14 +302,16 @@ mod test { assert_eq!( pallet_old::Event::::decode( &mut &pallet::Event::::Dummy(10).encode()[..] - ).unwrap(), + ) + .unwrap(), pallet_old::Event::::Dummy(10), ); assert_eq!( pallet_old::Call::::decode( &mut &pallet::Call::::set_dummy(10).encode()[..] - ).unwrap(), + ) + .unwrap(), pallet_old::Call::::set_dummy(10), ); } diff --git a/substrate/frame/support/test/tests/pallet_compatibility_instance.rs b/substrate/frame/support/test/tests/pallet_compatibility_instance.rs index fd5d5fb7fdbbd677d3a4c0579e364ba8bab80635..2d92920b81d8e1501e3d35f0d833b0eebce41930 100644 --- a/substrate/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/substrate/frame/support/test/tests/pallet_compatibility_instance.rs @@ -17,7 +17,7 @@ mod pallet_old { use frame_support::{ - decl_storage, decl_error, decl_event, decl_module, weights::Weight, traits::Get, Parameter + decl_error, decl_event, decl_module, decl_storage, traits::Get, weights::Weight, Parameter, }; use frame_system::ensure_root; @@ -39,7 +39,10 @@ mod pallet_old { } decl_event!( - pub enum Event where Balance = >::Balance { + pub enum Event + where + Balance = >::Balance, + { /// Dummy event, just here so there's a generic type that's used. Dummy(Balance), } @@ -83,12 +86,15 @@ mod pallet_old { #[frame_support::pallet] pub mod pallet { use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - use frame_system::ensure_root; + use frame_system::{ensure_root, pallet_prelude::*}; #[pallet::config] pub trait Config: frame_system::Config { - type Balance: Parameter + codec::HasCompact + From + Into + Default + type Balance: Parameter + + codec::HasCompact + + From + + Into + + Default + MaybeSerializeDeserialize; #[pallet::constant] type SomeConst: Get; @@ -115,7 +121,7 @@ pub mod pallet { #[pallet::weight(>::into(new_value.clone()))] pub fn set_dummy( origin: OriginFor, - #[pallet::compact] new_value: T::Balance + #[pallet::compact] new_value: T::Balance, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; @@ -151,12 +157,14 @@ pub mod pallet { #[pallet::storage] type Foo, I: 'static = ()> = StorageValue<_, T::Balance, ValueQuery, OnFooEmpty>; - #[pallet::type_value] pub fn OnFooEmpty, I: 'static>() -> T::Balance { 3.into() } + #[pallet::type_value] + pub fn OnFooEmpty, I: 'static>() -> T::Balance { + 3.into() + } #[pallet::storage] - type Double = StorageDoubleMap< - _, Blake2_128Concat, u32, Twox64Concat, u64, u16, ValueQuery - >; + type Double = + StorageDoubleMap<_, Blake2_128Concat, u32, Twox64Concat, u64, u16, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { @@ -272,9 +280,7 @@ frame_support::construct_runtime!( #[cfg(test)] mod test { - use super::Runtime; - use super::pallet; - use super::pallet_old; + use super::{pallet, pallet_old, Runtime}; use codec::{Decode, Encode}; #[test] @@ -288,11 +294,11 @@ mod test { _ => unreachable!(), }; for i in vec![1, 3, 5].into_iter() { - pretty_assertions::assert_eq!(modules[i].storage, modules[i+1].storage); - pretty_assertions::assert_eq!(modules[i].calls, modules[i+1].calls); - pretty_assertions::assert_eq!(modules[i].event, modules[i+1].event); - pretty_assertions::assert_eq!(modules[i].constants, modules[i+1].constants); - pretty_assertions::assert_eq!(modules[i].errors, modules[i+1].errors); + pretty_assertions::assert_eq!(modules[i].storage, modules[i + 1].storage); + pretty_assertions::assert_eq!(modules[i].calls, modules[i + 1].calls); + pretty_assertions::assert_eq!(modules[i].event, modules[i + 1].event); + pretty_assertions::assert_eq!(modules[i].constants, modules[i + 1].constants); + pretty_assertions::assert_eq!(modules[i].errors, modules[i + 1].errors); } } @@ -301,14 +307,16 @@ mod test { assert_eq!( pallet_old::Event::::decode( &mut &pallet::Event::::Dummy(10).encode()[..] - ).unwrap(), + ) + .unwrap(), pallet_old::Event::::Dummy(10), ); assert_eq!( pallet_old::Call::::decode( &mut &pallet::Call::::set_dummy(10).encode()[..] - ).unwrap(), + ) + .unwrap(), pallet_old::Call::::set_dummy(10), ); } diff --git a/substrate/frame/support/test/tests/pallet_instance.rs b/substrate/frame/support/test/tests/pallet_instance.rs index 11f9497b7bec24eb3dcbd3f3180d5d297b4cd7b9..3181f54f06a9bc0c9590dfff79f43afc568bc90e 100644 --- a/substrate/frame/support/test/tests/pallet_instance.rs +++ b/substrate/frame/support/test/tests/pallet_instance.rs @@ -16,21 +16,24 @@ // limitations under the License. use frame_support::{ - weights::{DispatchInfo, DispatchClass, Pays, GetDispatchInfo}, - traits::{ - GetCallName, GetPalletVersion, OnInitialize, OnFinalize, OnRuntimeUpgrade, OnGenesis, - }, dispatch::UnfilteredDispatchable, storage::unhashed, + traits::{ + GetCallName, GetPalletVersion, OnFinalize, OnGenesis, OnInitialize, OnRuntimeUpgrade, + }, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Pays}, +}; +use sp_io::{ + hashing::{blake2_128, twox_128, twox_64}, + TestExternalities, }; use sp_runtime::DispatchError; -use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; #[frame_support::pallet] pub mod pallet { - use sp_std::any::TypeId; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; + use sp_std::any::TypeId; type BalanceOf = >::Balance; @@ -73,15 +76,17 @@ pub mod pallet { 31 } } - fn integrity_test() { - } + fn integrity_test() {} } #[pallet::call] impl, I: 'static> Pallet { /// Doc comment put in metadata #[pallet::weight(Weight::from(*_foo))] - pub fn foo(origin: OriginFor, #[pallet::compact] _foo: u32) -> DispatchResultWithPostInfo { + pub fn foo( + origin: OriginFor, + #[pallet::compact] _foo: u32, + ) -> DispatchResultWithPostInfo { let _ = origin; Self::deposit_event(Event::Something(3)); Ok(().into()) @@ -92,14 +97,13 @@ pub mod pallet { #[frame_support::transactional] pub fn foo_transactional( origin: OriginFor, - #[pallet::compact] _foo: u32 + #[pallet::compact] _foo: u32, ) -> DispatchResultWithPostInfo { let _ = origin; Ok(().into()) } } - #[pallet::error] pub enum Error { /// doc comment put into metadata @@ -140,14 +144,8 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn nmap2)] - pub type NMap2 = StorageNMap< - _, - ( - storage::Key, - storage::Key, - ), - u64, - >; + pub type NMap2 = + StorageNMap<_, (storage::Key, storage::Key), u64>; #[pallet::genesis_config] #[derive(Default)] @@ -156,7 +154,7 @@ pub mod pallet { } #[pallet::genesis_build] - impl, I:'static> GenesisBuild for GenesisConfig { + impl, I: 'static> GenesisBuild for GenesisConfig { fn build(&self) {} } @@ -169,7 +167,7 @@ pub mod pallet { type Call = Call; fn validate_unsigned( _source: TransactionSource, - _call: &Self::Call + _call: &Self::Call, ) -> TransactionValidity { Err(TransactionValidityError::Invalid(InvalidTransaction::Call)) } @@ -193,8 +191,7 @@ pub mod pallet { #[derive(codec::Encode, sp_runtime::RuntimeDebug)] #[cfg_attr(feature = "std", derive(codec::Decode))] - pub enum InherentError { - } + pub enum InherentError {} impl frame_support::inherent::IsFatalError for InherentError { fn is_fatal_error(&self) -> bool { @@ -232,9 +229,7 @@ pub mod pallet2 { impl, I: 'static> Default for GenesisConfig { fn default() -> Self { - GenesisConfig { - phantom: Default::default(), - } + GenesisConfig { phantom: Default::default() } } } @@ -245,7 +240,7 @@ pub mod pallet2 { } frame_support::parameter_types!( - pub const MyGetParam: u32= 10; + pub const MyGetParam: u32 = 10; pub const BlockHashCount: u32 = 250; ); @@ -276,12 +271,12 @@ impl frame_system::Config for Runtime { } impl pallet::Config for Runtime { type Event = Event; - type MyGetParam= MyGetParam; + type MyGetParam = MyGetParam; type Balance = u64; } impl pallet::Config for Runtime { type Event = Event; - type MyGetParam= MyGetParam; + type MyGetParam = MyGetParam; type Balance = u64; } impl pallet2::Config for Runtime { @@ -316,26 +311,15 @@ fn call_expand() { let call_foo = pallet::Call::::foo(3); assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { - weight: 3, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - } + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } ); assert_eq!(call_foo.get_call_name(), "foo"); - assert_eq!( - pallet::Call::::get_call_names(), - &["foo", "foo_transactional"], - ); + assert_eq!(pallet::Call::::get_call_names(), &["foo", "foo_transactional"],); let call_foo = pallet::Call::::foo(3); assert_eq!( call_foo.get_dispatch_info(), - DispatchInfo { - weight: 3, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - } + DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: Pays::Yes } ); assert_eq!(call_foo.get_call_name(), "foo"); assert_eq!( @@ -356,11 +340,7 @@ fn error_expand() { ); assert_eq!( DispatchError::from(pallet::Error::::InsufficientProposersBalance), - DispatchError::Module { - index: 1, - error: 0, - message: Some("InsufficientProposersBalance"), - }, + DispatchError::Module { index: 1, error: 0, message: Some("InsufficientProposersBalance") }, ); assert_eq!( @@ -368,16 +348,16 @@ fn error_expand() { String::from("InsufficientProposersBalance"), ); assert_eq!( - <&'static str>::from(pallet::Error::::InsufficientProposersBalance), + <&'static str>::from( + pallet::Error::::InsufficientProposersBalance + ), "InsufficientProposersBalance", ); assert_eq!( - DispatchError::from(pallet::Error::::InsufficientProposersBalance), - DispatchError::Module { - index: 2, - error: 0, - message: Some("InsufficientProposersBalance"), - }, + DispatchError::from( + pallet::Error::::InsufficientProposersBalance + ), + DispatchError::Module { index: 2, error: 0, message: Some("InsufficientProposersBalance") }, ); } @@ -400,7 +380,9 @@ fn pallet_expand_deposit_event() { TestExternalities::default().execute_with(|| { frame_system::Pallet::::set_block_number(1); - pallet::Call::::foo(3).dispatch_bypass_filter(None.into()).unwrap(); + pallet::Call::::foo(3) + .dispatch_bypass_filter(None.into()) + .unwrap(); assert_eq!( frame_system::Pallet::::events()[0].event, Event::Instance1Example(pallet::Event::Something(3)), @@ -410,8 +392,7 @@ fn pallet_expand_deposit_event() { #[test] fn storage_expand() { - use frame_support::pallet_prelude::*; - use frame_support::storage::StoragePrefixedMap; + use frame_support::{pallet_prelude::*, storage::StoragePrefixedMap}; fn twox_64_concat(d: &[u8]) -> Vec { let mut v = twox_64(d).to_vec(); @@ -585,8 +566,8 @@ fn pallet_on_genesis() { #[test] fn metadata() { - use frame_metadata::*; use codec::{Decode, Encode}; + use frame_metadata::*; let expected_pallet_metadata = ModuleMetadata { index: 1, @@ -656,9 +637,7 @@ fn metadata() { modifier: StorageEntryModifier::Optional, ty: StorageEntryType::NMap { keys: DecodeDifferent::Decoded(vec!["u8".to_string()]), - hashers: DecodeDifferent::Decoded(vec![ - StorageHasher::Blake2_128Concat, - ]), + hashers: DecodeDifferent::Decoded(vec![StorageHasher::Blake2_128Concat]), value: DecodeDifferent::Decoded("u32".to_string()), }, default: DecodeDifferent::Decoded(vec![0]), @@ -668,10 +647,7 @@ fn metadata() { name: DecodeDifferent::Decoded("NMap2".to_string()), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::NMap { - keys: DecodeDifferent::Decoded(vec![ - "u16".to_string(), - "u32".to_string(), - ]), + keys: DecodeDifferent::Decoded(vec!["u16".to_string(), "u32".to_string()]), hashers: DecodeDifferent::Decoded(vec![ StorageHasher::Twox64Concat, StorageHasher::Blake2_128Concat, @@ -686,33 +662,31 @@ fn metadata() { calls: Some(DecodeDifferent::Decoded(vec![ FunctionMetadata { name: DecodeDifferent::Decoded("foo".to_string()), - arguments: DecodeDifferent::Decoded(vec![ - FunctionArgumentMetadata { - name: DecodeDifferent::Decoded("_foo".to_string()), - ty: DecodeDifferent::Decoded("Compact".to_string()), - } - ]), + arguments: DecodeDifferent::Decoded(vec![FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("_foo".to_string()), + ty: DecodeDifferent::Decoded("Compact".to_string()), + }]), documentation: DecodeDifferent::Decoded(vec![ - " Doc comment put in metadata".to_string(), + " Doc comment put in metadata".to_string() ]), }, FunctionMetadata { name: DecodeDifferent::Decoded("foo_transactional".to_string()), - arguments: DecodeDifferent::Decoded(vec![ - FunctionArgumentMetadata { - name: DecodeDifferent::Decoded("_foo".to_string()), - ty: DecodeDifferent::Decoded("Compact".to_string()), - } - ]), + arguments: DecodeDifferent::Decoded(vec![FunctionArgumentMetadata { + name: DecodeDifferent::Decoded("_foo".to_string()), + ty: DecodeDifferent::Decoded("Compact".to_string()), + }]), documentation: DecodeDifferent::Decoded(vec![ - " Doc comment put in metadata".to_string(), + " Doc comment put in metadata".to_string() ]), }, ])), event: Some(DecodeDifferent::Decoded(vec![ EventMetadata { name: DecodeDifferent::Decoded("Proposed".to_string()), - arguments: DecodeDifferent::Decoded(vec!["::AccountId".to_string()]), + arguments: DecodeDifferent::Decoded(vec![ + "::AccountId".to_string() + ]), documentation: DecodeDifferent::Decoded(vec![ " doc comment put in metadata".to_string() ]), @@ -720,9 +694,7 @@ fn metadata() { EventMetadata { name: DecodeDifferent::Decoded("Spending".to_string()), arguments: DecodeDifferent::Decoded(vec!["Balance".to_string()]), - documentation: DecodeDifferent::Decoded(vec![ - " doc".to_string() - ]), + documentation: DecodeDifferent::Decoded(vec![" doc".to_string()]), }, EventMetadata { name: DecodeDifferent::Decoded("Something".to_string()), @@ -730,26 +702,23 @@ fn metadata() { documentation: DecodeDifferent::Decoded(vec![]), }, ])), - constants: DecodeDifferent::Decoded(vec![ - ModuleConstantMetadata { - name: DecodeDifferent::Decoded("MyGetParam".to_string()), - ty: DecodeDifferent::Decoded("u32".to_string()), - value: DecodeDifferent::Decoded(vec![10, 0, 0, 0]), - documentation: DecodeDifferent::Decoded(vec![]), - }, - ]), - errors: DecodeDifferent::Decoded(vec![ - ErrorMetadata { - name: DecodeDifferent::Decoded("InsufficientProposersBalance".to_string()), - documentation: DecodeDifferent::Decoded(vec![ - " doc comment put into metadata".to_string(), - ]), - }, - ]), + constants: DecodeDifferent::Decoded(vec![ModuleConstantMetadata { + name: DecodeDifferent::Decoded("MyGetParam".to_string()), + ty: DecodeDifferent::Decoded("u32".to_string()), + value: DecodeDifferent::Decoded(vec![10, 0, 0, 0]), + documentation: DecodeDifferent::Decoded(vec![]), + }]), + errors: DecodeDifferent::Decoded(vec![ErrorMetadata { + name: DecodeDifferent::Decoded("InsufficientProposersBalance".to_string()), + documentation: DecodeDifferent::Decoded(vec![ + " doc comment put into metadata".to_string() + ]), + }]), }; let mut expected_pallet_instance1_metadata = expected_pallet_metadata.clone(); - expected_pallet_instance1_metadata.name = DecodeDifferent::Decoded("Instance1Example".to_string()); + expected_pallet_instance1_metadata.name = + DecodeDifferent::Decoded("Instance1Example".to_string()); expected_pallet_instance1_metadata.index = 2; match expected_pallet_instance1_metadata.storage { Some(DecodeDifferent::Decoded(ref mut storage_meta)) => { @@ -758,7 +727,6 @@ fn metadata() { _ => unreachable!(), } - let metadata = match Runtime::metadata().1 { RuntimeMetadata::V13(metadata) => metadata, _ => panic!("metadata has been bump, test needs to be updated"), @@ -781,9 +749,15 @@ fn metadata() { fn test_pallet_info_access() { assert_eq!(::name(), "System"); assert_eq!(::name(), "Example"); - assert_eq!(::name(), "Instance1Example"); + assert_eq!( + ::name(), + "Instance1Example" + ); assert_eq!(::name(), "Example2"); - assert_eq!(::name(), "Instance1Example2"); + assert_eq!( + ::name(), + "Instance1Example2" + ); assert_eq!(::index(), 0); assert_eq!(::index(), 1); diff --git a/substrate/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/substrate/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index 8a6ee8b8f5045a538daebba5c30b595b0c5da16a..4bc3cfdcbf9b731e04261577f46991a80c0c5542 100644 --- a/substrate/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -6,5 +6,5 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is | ::: $WORKSPACE/frame/support/src/traits/hooks.rs | - | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { - | ------- required by this bound in `GenesisBuild` + | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + | ------- required by this bound in `GenesisBuild` diff --git a/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index 3812b433e20ca55e4f49d91b64e2c3002975295d..23651faa59d58729ea40ec18a9c90f8446763ce1 100644 --- a/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -5,9 +5,9 @@ error[E0107]: missing generics for trait `Hooks` | ^^^^^ expected 1 type argument | note: trait defined here, with 1 type parameter: `BlockNumber` - --> $DIR/hooks.rs:206:11 + --> $DIR/hooks.rs:212:11 | -206 | pub trait Hooks { +212 | pub trait Hooks { | ^^^^^ ----------- help: use angle brackets to add missing type argument | diff --git a/substrate/frame/support/test/tests/pallet_version.rs b/substrate/frame/support/test/tests/pallet_version.rs index ed0bf52a0346f8201fc9b388ec4d62b4e112bc17..5048f47f6752284b7ed49f04e55441bec5a64843 100644 --- a/substrate/frame/support/test/tests/pallet_version.rs +++ b/substrate/frame/support/test/tests/pallet_version.rs @@ -17,15 +17,22 @@ //! Tests related to the pallet version. -#![recursion_limit="128"] +#![recursion_limit = "128"] use codec::{Decode, Encode}; -use sp_runtime::{generic, traits::{BlakeTwo256, Verify}, BuildStorage}; use frame_support::{ - traits::{PALLET_VERSION_STORAGE_KEY_POSTFIX, PalletVersion, OnRuntimeUpgrade, GetPalletVersion}, - crate_to_pallet_version, weights::Weight, + crate_to_pallet_version, + traits::{ + GetPalletVersion, OnRuntimeUpgrade, PalletVersion, PALLET_VERSION_STORAGE_KEY_POSTFIX, + }, + weights::Weight, +}; +use sp_core::{sr25519, H256}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Verify}, + BuildStorage, }; -use sp_core::{H256, sr25519}; /// A version that we will check for in the tests const SOME_TEST_VERSION: PalletVersion = PalletVersion { major: 3000, minor: 30, patch: 13 }; @@ -47,7 +54,7 @@ mod module1 { mod module2 { use super::*; - pub trait Config: frame_system::Config {} + pub trait Config: frame_system::Config {} frame_support::decl_module! { pub struct Module, I: Instance=DefaultInstance> for enum Call where @@ -82,8 +89,7 @@ mod pallet3 { use frame_system::pallet_prelude::*; #[pallet::config] - pub trait Config: frame_system::Config { - } + pub trait Config: frame_system::Config {} #[pallet::pallet] pub struct Pallet(_); @@ -91,13 +97,12 @@ mod pallet3 { #[pallet::hooks] impl Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - return 3; + return 3 } } #[pallet::call] - impl Pallet { - } + impl Pallet {} } #[frame_support::pallet] @@ -106,22 +111,20 @@ mod pallet4 { use frame_system::pallet_prelude::*; #[pallet::config] - pub trait Config: frame_system::Config { - } + pub trait Config: frame_system::Config {} #[pallet::pallet] - pub struct Pallet(PhantomData<(T, I)>); + pub struct Pallet(PhantomData<(T, I)>); #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { fn on_runtime_upgrade() -> Weight { - return 3; + return 3 } } #[pallet::call] - impl, I: 'static> Pallet { - } + impl, I: 'static> Pallet {} } impl module1::Config for Runtime {} @@ -210,8 +213,8 @@ fn get_pallet_version_storage_key_for_pallet(pallet: &str) -> [u8; 32] { fn check_pallet_version(pallet: &str) { let key = get_pallet_version_storage_key_for_pallet(pallet); let value = sp_io::storage::get(&key).expect("Pallet version exists"); - let version = PalletVersion::decode(&mut &value[..]) - .expect("Pallet version is encoded correctly"); + let version = + PalletVersion::decode(&mut &value[..]).expect("Pallet version is encoded correctly"); assert_eq!(crate_to_pallet_version!(), version); } diff --git a/substrate/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/substrate/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index 665bbc2b5c51378bd253b7eb104fe961af535ec4..867d952741011e0192b2dd7276f86c0cf5c8e3d1 100644 --- a/substrate/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/substrate/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -28,7 +28,10 @@ frame_support::decl_storage! { } frame_support::decl_event!( - pub enum Event where B = ::Balance { + pub enum Event + where + B = ::Balance, + { Dummy(B), } ); diff --git a/substrate/frame/support/test/tests/storage_transaction.rs b/substrate/frame/support/test/tests/storage_transaction.rs index b518c60e957c6462ad2dedf5ddb195a21dd057ef..4e97a87377b17467d10a84f96838493600d5079b 100644 --- a/substrate/frame/support/test/tests/storage_transaction.rs +++ b/substrate/frame/support/test/tests/storage_transaction.rs @@ -16,8 +16,10 @@ // limitations under the License. use frame_support::{ - assert_ok, assert_noop, transactional, StorageMap, StorageValue, - dispatch::{DispatchError, DispatchResult}, storage::{with_transaction, TransactionOutcome::*}, + assert_noop, assert_ok, + dispatch::{DispatchError, DispatchResult}, + storage::{with_transaction, TransactionOutcome::*}, + transactional, StorageMap, StorageValue, }; use sp_io::TestExternalities; use sp_std::result; @@ -41,7 +43,7 @@ frame_support::decl_module! { } } -frame_support::decl_storage!{ +frame_support::decl_storage! { trait Store for Module as StorageTransactions { pub Value: u32; pub Map: map hasher(twox_64_concat) String => u32; @@ -62,7 +64,6 @@ impl Config for Runtime {} #[test] fn storage_transaction_basic_commit() { TestExternalities::default().execute_with(|| { - assert_eq!(Value::get(), 0); assert!(!Map::contains_key("val0")); @@ -82,7 +83,6 @@ fn storage_transaction_basic_commit() { #[test] fn storage_transaction_basic_rollback() { TestExternalities::default().execute_with(|| { - assert_eq!(Value::get(), 0); assert_eq!(Map::get("val0"), 0); diff --git a/substrate/frame/support/test/tests/system.rs b/substrate/frame/support/test/tests/system.rs index c4d7cf01ae215ccf36db0fb60a0d57183f0b5e5a..a0947e72b194f8cc77f060a7cb4e910d87a4fad3 100644 --- a/substrate/frame/support/test/tests/system.rs +++ b/substrate/frame/support/test/tests/system.rs @@ -16,7 +16,9 @@ // limitations under the License. use frame_support::{ - codec::{Encode, Decode, EncodeLike}, traits::Get, weights::RuntimeDbWeight, + codec::{Decode, Encode, EncodeLike}, + traits::Get, + weights::RuntimeDbWeight, }; pub trait Config: 'static + Eq + Clone { @@ -45,7 +47,10 @@ impl Module { } frame_support::decl_event!( - pub enum Event where BlockNumber = ::BlockNumber { + pub enum Event + where + BlockNumber = ::BlockNumber, + { ExtrinsicSuccess, ExtrinsicFailed, Ignore(BlockNumber), @@ -83,7 +88,8 @@ pub type Origin = RawOrigin<::AccountId>; #[allow(dead_code)] pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { o.into().map(|_| ()).map_err(|_| "bad origin: expected to be a root origin") } diff --git a/substrate/frame/system/benches/bench.rs b/substrate/frame/system/benches/bench.rs index 02ea48bdde03292c71db544268be10cfd2a13ad3..e3f60733a62377919438132a5a8435dae153ac96 100644 --- a/substrate/frame/system/benches/bench.rs +++ b/substrate/frame/system/benches/bench.rs @@ -15,11 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{Criterion, criterion_group, criterion_main, black_box}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use frame_support::{decl_event, decl_module}; use frame_system as system; -use frame_support::{decl_module, decl_event}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; mod module { use super::*; @@ -104,17 +108,18 @@ fn deposit_events(n: usize) { let mut t = new_test_ext(); t.execute_with(|| { for _ in 0..n { - module::Module::::deposit_event( - module::Event::Complex(vec![1, 2, 3], 2, 3, 899) - ); + module::Module::::deposit_event(module::Event::Complex( + vec![1, 2, 3], + 2, + 3, + 899, + )); } }); } fn sr_system_benchmark(c: &mut Criterion) { - c.bench_function("deposit 100 events", |b| { - b.iter(|| deposit_events(black_box(100))) - }); + c.bench_function("deposit 100 events", |b| b.iter(|| deposit_events(black_box(100)))); } criterion_group!(benches, sr_system_benchmark); diff --git a/substrate/frame/system/benchmarking/src/lib.rs b/substrate/frame/system/benchmarking/src/lib.rs index 7146bcd60645b66e0a9de770387a8398998f9c6f..4b25dcd06a6363c3e4b304a2e4431a4119998c03 100644 --- a/substrate/frame/system/benchmarking/src/lib.rs +++ b/substrate/frame/system/benchmarking/src/lib.rs @@ -20,17 +20,12 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use sp_std::vec; -use sp_std::prelude::*; -use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::{storage, traits::Get, weights::DispatchClass}; +use frame_system::{Call, DigestItemOf, Pallet as System, RawOrigin}; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_runtime::traits::Hash; -use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; -use frame_support::{ - storage, - traits::Get, - weights::DispatchClass, -}; -use frame_system::{Pallet as System, Call, RawOrigin, DigestItemOf}; +use sp_std::{prelude::*, vec}; mod mock; @@ -144,8 +139,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index aa6c1358790a411e2fcfc2283741be3e4715249a..4f561f17c356408b956159c019e9fa5df25b0e00 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; use crate::{Config, Pallet}; +use codec::{Decode, Encode}; use sp_runtime::{ traits::{SignedExtension, Zero}, transaction_validity::TransactionValidityError, diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index b3e4c4ecfda86dbf4a152094514a3b37ce816a44..6596939eb9d684c7b1d2f5021bc587b40dd7fe66 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -15,13 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; -use crate::{Config, Pallet, BlockHash}; +use crate::{BlockHash, Config, Pallet}; +use codec::{Decode, Encode}; use sp_runtime::{ generic::Era, - traits::{SignedExtension, DispatchInfoOf, SaturatedConversion}, + traits::{DispatchInfoOf, SaturatedConversion, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, }; @@ -84,7 +84,7 @@ impl SignedExtension for CheckMortality { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Test, new_test_ext, System, CALL}; + use crate::mock::{new_test_ext, System, Test, CALL}; use frame_support::weights::{DispatchClass, DispatchInfo, Pays}; use sp_core::H256; @@ -93,7 +93,10 @@ mod tests { new_test_ext().execute_with(|| { // future assert_eq!( - CheckMortality::::from(Era::mortal(4, 2)).additional_signed().err().unwrap(), + CheckMortality::::from(Era::mortal(4, 2)) + .additional_signed() + .err() + .unwrap(), InvalidTransaction::AncientBirthBlock.into(), ); @@ -107,7 +110,8 @@ mod tests { #[test] fn signed_ext_check_era_should_change_longevity() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; let len = 0_usize; let ext = ( crate::CheckWeight::::new(), diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index cb25c3c027889f5e6ee240d4c3bebaf6a72cb986..6eaa9f9e02a4b308cb3a5eb3a37c91e3fa010ad3 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; use crate::Config; +use codec::{Decode, Encode}; use frame_support::weights::DispatchInfo; use sp_runtime::{ - traits::{SignedExtension, DispatchInfoOf, Dispatchable, One}, + traits::{DispatchInfoOf, Dispatchable, One, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, - TransactionLongevity, + InvalidTransaction, TransactionLongevity, TransactionValidity, TransactionValidityError, + ValidTransaction, }, }; use sp_std::vec; @@ -53,8 +53,9 @@ impl sp_std::fmt::Debug for CheckNonce { } } -impl SignedExtension for CheckNonce where - T::Call: Dispatchable +impl SignedExtension for CheckNonce +where + T::Call: Dispatchable, { type AccountId = T::AccountId; type Call = T::Call; @@ -62,7 +63,9 @@ impl SignedExtension for CheckNonce where type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn pre_dispatch( self, @@ -73,13 +76,12 @@ impl SignedExtension for CheckNonce where ) -> Result<(), TransactionValidityError> { let mut account = crate::Account::::get(who); if self.0 != account.nonce { - return Err( - if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - }.into() - ) + return Err(if self.0 < account.nonce { + InvalidTransaction::Stale + } else { + InvalidTransaction::Future + } + .into()) } account.nonce += T::Index::one(); crate::Account::::insert(who, account); @@ -119,19 +121,22 @@ impl SignedExtension for CheckNonce where #[cfg(test)] mod tests { use super::*; - use crate::mock::{Test, new_test_ext, CALL}; + use crate::mock::{new_test_ext, Test, CALL}; use frame_support::{assert_noop, assert_ok}; #[test] fn signed_ext_check_nonce_works() { new_test_ext().execute_with(|| { - crate::Account::::insert(1, crate::AccountInfo { - nonce: 1, - consumers: 0, - providers: 0, - sufficients: 0, - data: 0, - }); + crate::Account::::insert( + 1, + crate::AccountInfo { + nonce: 1, + consumers: 0, + providers: 0, + sufficients: 0, + data: 0, + }, + ); let info = DispatchInfo::default(); let len = 0_usize; // stale diff --git a/substrate/frame/system/src/extensions/check_spec_version.rs b/substrate/frame/system/src/extensions/check_spec_version.rs index e41ce1725a5492b4c1c7ebe7f0e96536838d9686..7f5629fefa924c0a8f8146d1e57f8b95e82b1a3f 100644 --- a/substrate/frame/system/src/extensions/check_spec_version.rs +++ b/substrate/frame/system/src/extensions/check_spec_version.rs @@ -16,11 +16,8 @@ // limitations under the License. use crate::{Config, Pallet}; -use codec::{Encode, Decode}; -use sp_runtime::{ - traits::SignedExtension, - transaction_validity::TransactionValidityError, -}; +use codec::{Decode, Encode}; +use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionValidityError}; /// Ensure the runtime version registered in the transaction is the same as at present. #[derive(Encode, Decode, Clone, Eq, PartialEq)] diff --git a/substrate/frame/system/src/extensions/check_tx_version.rs b/substrate/frame/system/src/extensions/check_tx_version.rs index ad23dc7e9dd0556c8557f4527aea23ecad09c478..badf0292601b67277924d28ca6f1f97f7158e721 100644 --- a/substrate/frame/system/src/extensions/check_tx_version.rs +++ b/substrate/frame/system/src/extensions/check_tx_version.rs @@ -16,11 +16,8 @@ // limitations under the License. use crate::{Config, Pallet}; -use codec::{Encode, Decode}; -use sp_runtime::{ - traits::SignedExtension, - transaction_validity::TransactionValidityError, -}; +use codec::{Decode, Encode}; +use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionValidityError}; /// Ensure the transaction version registered in the transaction is the same as at present. #[derive(Encode, Decode, Clone, Eq, PartialEq)] diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index e01c91317615d8ceefb599ceeb1fe8d2ed85faef..40be222c2f8745310c0999fa9ba0935a11728745 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -16,26 +16,27 @@ // limitations under the License. use crate::{limits::BlockWeights, Config, Pallet}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use frame_support::{ + traits::Get, + weights::{priority::FrameTransactionPriority, DispatchClass, DispatchInfo, PostDispatchInfo}, +}; use sp_runtime::{ - traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf}, + traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, - TransactionPriority, + InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, + ValidTransaction, }, DispatchResult, }; -use frame_support::{ - traits::Get, - weights::{PostDispatchInfo, DispatchInfo, DispatchClass, priority::FrameTransactionPriority}, -}; /// Block resource (weight) limit check. #[derive(Encode, Decode, Clone, Eq, PartialEq, Default)] pub struct CheckWeight(sp_std::marker::PhantomData); -impl CheckWeight where - T::Call: Dispatchable, +impl CheckWeight +where + T::Call: Dispatchable, { /// Checks if the current extrinsic does not exceed the maximum weight a single extrinsic /// with given `DispatchClass` can have. @@ -44,9 +45,7 @@ impl CheckWeight where ) -> Result<(), TransactionValidityError> { let max = T::BlockWeights::get().get(info.class).max_extrinsic; match max { - Some(max) if info.weight > max => { - Err(InvalidTransaction::ExhaustsResources.into()) - }, + Some(max) if info.weight > max => Err(InvalidTransaction::ExhaustsResources.into()), _ => Ok(()), } } @@ -87,8 +86,7 @@ impl CheckWeight where fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { match info.class { // Normal transaction. - DispatchClass::Normal => - FrameTransactionPriority::Normal(info.weight.into()).into(), + DispatchClass::Normal => FrameTransactionPriority::Normal(info.weight.into()).into(), // Don't use up the whole priority space, to allow things like `tip` to be taken into // account as well. DispatchClass::Operational => @@ -122,10 +120,7 @@ impl CheckWeight where /// Do the validate checks. This can be applied to both signed and unsigned. /// /// It only checks that the block weight and length limit will not exceed. - pub fn do_validate( - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { + pub fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { // ignore the next length. If they return `Ok`, then it is below the limit. let _ = Self::check_block_length(info, len)?; // during validation we skip block limit check. Since the `validate_transaction` @@ -141,17 +136,20 @@ pub fn calculate_consumed_weight( maximum_weight: BlockWeights, mut all_weight: crate::ConsumedWeight, info: &DispatchInfoOf, -) -> Result where - Call: Dispatchable, +) -> Result +where + Call: Dispatchable, { - let extrinsic_weight = info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); + let extrinsic_weight = + info.weight.saturating_add(maximum_weight.get(info.class).base_extrinsic); let limit_per_class = maximum_weight.get(info.class); // add the weight. If class is unlimited, use saturating add instead of checked one. if limit_per_class.max_total.is_none() && limit_per_class.reserved.is_none() { all_weight.add(extrinsic_weight, info.class) } else { - all_weight.checked_add(extrinsic_weight, info.class) + all_weight + .checked_add(extrinsic_weight, info.class) .map_err(|_| InvalidTransaction::ExhaustsResources)?; } @@ -159,9 +157,7 @@ pub fn calculate_consumed_weight( // Check if we don't exceed per-class allowance match limit_per_class.max_total { - Some(max) if per_class > max => { - return Err(InvalidTransaction::ExhaustsResources.into()); - }, + Some(max) if per_class > max => return Err(InvalidTransaction::ExhaustsResources.into()), // There is no `max_total` limit (`None`), // or we are below the limit. _ => {}, @@ -172,9 +168,8 @@ pub fn calculate_consumed_weight( if all_weight.total() > maximum_weight.max_block { match limit_per_class.reserved { // We are over the limit in reserved pool. - Some(reserved) if per_class > reserved => { - return Err(InvalidTransaction::ExhaustsResources.into()); - } + Some(reserved) if per_class > reserved => + return Err(InvalidTransaction::ExhaustsResources.into()), // There is either no limit in reserved pool (`None`), // or we are below the limit. _ => {}, @@ -184,8 +179,9 @@ pub fn calculate_consumed_weight( Ok(all_weight) } -impl SignedExtension for CheckWeight where - T::Call: Dispatchable +impl SignedExtension for CheckWeight +where + T::Call: Dispatchable, { type AccountId = T::AccountId; type Call = T::Call; @@ -193,7 +189,9 @@ impl SignedExtension for CheckWeight where type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn pre_dispatch( self, @@ -278,17 +276,24 @@ impl sp_std::fmt::Debug for CheckWeight { #[cfg(test)] mod tests { use super::*; - use crate::{BlockWeight, AllExtrinsicsLen}; - use crate::mock::{Test, CALL, new_test_ext, System}; + use crate::{ + mock::{new_test_ext, System, Test, CALL}, + AllExtrinsicsLen, BlockWeight, + }; + use frame_support::{ + assert_err, assert_ok, + weights::{Pays, Weight}, + }; use sp_std::marker::PhantomData; - use frame_support::{assert_err, assert_ok, weights::{Weight, Pays}}; fn block_weights() -> crate::limits::BlockWeights { ::BlockWeights::get() } fn normal_weight_limit() -> Weight { - block_weights().get(DispatchClass::Normal).max_total + block_weights() + .get(DispatchClass::Normal) + .max_total .unwrap_or_else(|| block_weights().max_block) } @@ -334,7 +339,10 @@ mod tests { ..Default::default() }; let len = 0_usize; - assert_err!(CheckWeight::::do_validate(&max, len), InvalidTransaction::ExhaustsResources); + assert_err!( + CheckWeight::::do_validate(&max, len), + InvalidTransaction::ExhaustsResources + ); }); } @@ -342,16 +350,15 @@ mod tests { fn operational_extrinsic_limited_by_operational_space_limit() { new_test_ext().execute_with(|| { let weights = block_weights(); - let operational_limit = weights.get(DispatchClass::Operational).max_total + let operational_limit = weights + .get(DispatchClass::Operational) + .max_total .unwrap_or_else(|| weights.max_block); let base_weight = weights.get(DispatchClass::Normal).base_extrinsic; let weight = operational_limit - base_weight; - let okay = DispatchInfo { - weight, - class: DispatchClass::Operational, - ..Default::default() - }; + let okay = + DispatchInfo { weight, class: DispatchClass::Operational, ..Default::default() }; let max = DispatchInfo { weight: weight + 1, class: DispatchClass::Operational, @@ -366,7 +373,10 @@ mod tests { ..Default::default() }) ); - assert_err!(CheckWeight::::do_validate(&max, len), InvalidTransaction::ExhaustsResources); + assert_err!( + CheckWeight::::do_validate(&max, len), + InvalidTransaction::ExhaustsResources + ); }); } @@ -388,7 +398,11 @@ mod tests { // So normal extrinsic can be 758 weight (-5 for base extrinsic weight) // And Operational can be 256 to produce a full block (-5 for base) let max_normal = DispatchInfo { weight: 753, ..Default::default() }; - let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + let rest_operational = DispatchInfo { + weight: 251, + class: DispatchClass::Operational, + ..Default::default() + }; let len = 0_usize; @@ -407,7 +421,11 @@ mod tests { new_test_ext().execute_with(|| { // We switch the order of `full_block_with_normal_and_operational` let max_normal = DispatchInfo { weight: 753, ..Default::default() }; - let rest_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + let rest_operational = DispatchInfo { + weight: 251, + class: DispatchClass::Operational, + ..Default::default() + }; let len = 0_usize; @@ -425,17 +443,24 @@ mod tests { new_test_ext().execute_with(|| { // An on_initialize takes up the whole block! (Every time!) System::register_extra_weight_unchecked(Weight::max_value(), DispatchClass::Mandatory); - let dispatch_normal = DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; - let dispatch_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; + let dispatch_normal = + DispatchInfo { weight: 251, class: DispatchClass::Normal, ..Default::default() }; + let dispatch_operational = DispatchInfo { + weight: 251, + class: DispatchClass::Operational, + ..Default::default() + }; let len = 0_usize; - assert_err!( CheckWeight::::do_pre_dispatch(&dispatch_normal, len), + assert_err!( + CheckWeight::::do_pre_dispatch(&dispatch_normal, len), InvalidTransaction::ExhaustsResources ); // Thank goodness we can still do an operational transaction to possibly save the blockchain. assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); // Not too much though - assert_err!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len), + assert_err!( + CheckWeight::::do_pre_dispatch(&dispatch_operational, len), InvalidTransaction::ExhaustsResources ); // Even with full block, validity of single transaction should be correct. @@ -447,7 +472,11 @@ mod tests { fn signed_ext_check_weight_works_operational_tx() { new_test_ext().execute_with(|| { let normal = DispatchInfo { weight: 100, ..Default::default() }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; let len = 0_usize; let normal_limit = normal_weight_limit(); @@ -456,7 +485,8 @@ mod tests { current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. - assert_err!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), + assert_err!( + CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), InvalidTransaction::ExhaustsResources ); // will fit. @@ -465,7 +495,8 @@ mod tests { // likewise for length limit. let len = 100_usize; AllExtrinsicsLen::::put(normal_length_limit()); - assert_err!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), + assert_err!( + CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), InvalidTransaction::ExhaustsResources ); assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); @@ -475,8 +506,13 @@ mod tests { #[test] fn signed_ext_check_weight_works() { new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; let len = 0_usize; let priority = CheckWeight::(PhantomData) @@ -485,10 +521,8 @@ mod tests { .priority; assert_eq!(priority, 100); - let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, &op, len) - .unwrap() - .priority; + let priority = + CheckWeight::(PhantomData).validate(&1, CALL, &op, len).unwrap().priority; assert_eq!(priority, frame_support::weights::priority::LIMIT + 100); }) } @@ -501,7 +535,11 @@ mod tests { let reset_check_weight = |tx, s, f| { AllExtrinsicsLen::::put(0); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } + if f { + assert!(r.is_err()) + } else { + assert!(r.is_ok()) + } }; reset_check_weight(&normal, normal_limit - 1, false); @@ -509,7 +547,8 @@ mod tests { reset_check_weight(&normal, normal_limit + 1, true); // Operational ones don't have this limit. - let op = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; + let op = + DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: Pays::Yes }; reset_check_weight(&op, normal_limit, false); reset_check_weight(&op, normal_limit + 100, false); reset_check_weight(&op, 1024, false); @@ -517,21 +556,16 @@ mod tests { }) } - #[test] fn signed_ext_check_weight_works_normal_tx() { new_test_ext().execute_with(|| { let normal_limit = normal_weight_limit(); let small = DispatchInfo { weight: 100, ..Default::default() }; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; - let medium = DispatchInfo { - weight: normal_limit - base_extrinsic, - ..Default::default() - }; - let big = DispatchInfo { - weight: normal_limit - base_extrinsic + 1, - ..Default::default() - }; + let medium = + DispatchInfo { weight: normal_limit - base_extrinsic, ..Default::default() }; + let big = + DispatchInfo { weight: normal_limit - base_extrinsic + 1, ..Default::default() }; let len = 0_usize; let reset_check_weight = |i, f, s| { @@ -539,7 +573,11 @@ mod tests { current_weight.set(s, DispatchClass::Normal) }); let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } + if f { + assert!(r.is_err()) + } else { + assert!(r.is_ok()) + } }; reset_check_weight(&small, false, 0); @@ -553,10 +591,8 @@ mod tests { new_test_ext().execute_with(|| { // This is half of the max block weight let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(128), - pays_fee: Default::default(), - }; + let post_info = + PostDispatchInfo { actual_weight: Some(128), pays_fee: Default::default() }; let len = 0_usize; let base_extrinsic = block_weights().get(DispatchClass::Normal).base_extrinsic; @@ -569,11 +605,8 @@ mod tests { let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!(BlockWeight::::get().total(), info.weight + 256); - assert_ok!( CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(()))); - assert_eq!( - BlockWeight::::get().total(), - post_info.actual_weight.unwrap() + 256, - ); + assert_ok!(CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(()))); + assert_eq!(BlockWeight::::get().total(), post_info.actual_weight.unwrap() + 256,); }) } @@ -581,10 +614,8 @@ mod tests { fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { new_test_ext().execute_with(|| { let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { - actual_weight: Some(700), - pays_fee: Default::default(), - }; + let post_info = + PostDispatchInfo { actual_weight: Some(700), pays_fee: Default::default() }; let len = 0_usize; BlockWeight::::mutate(|current_weight| { @@ -614,10 +645,7 @@ mod tests { let len = 0_usize; // Initial weight from `weights.base_block` - assert_eq!( - System::block_weight().total(), - weights.base_block - ); + assert_eq!(System::block_weight().total(), weights.base_block); assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len)); assert_eq!( System::block_weight().total(), @@ -633,7 +661,11 @@ mod tests { // Max normal is 768 (75%) // Max mandatory is unlimited let max_normal = DispatchInfo { weight: 753, ..Default::default() }; - let mandatory = DispatchInfo { weight: 1019, class: DispatchClass::Mandatory, ..Default::default() }; + let mandatory = DispatchInfo { + weight: 1019, + class: DispatchClass::Mandatory, + ..Default::default() + }; let len = 0_usize; @@ -669,18 +701,24 @@ mod tests { assert_eq!(maximum_weight.max_block, all_weight.total()); // fits into reserved - let mandatory1 = DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; + let mandatory1 = + DispatchInfo { weight: 5, class: DispatchClass::Mandatory, ..Default::default() }; // does not fit into reserved and the block is full. - let mandatory2 = DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; + let mandatory2 = + DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; // when - assert_ok!( - calculate_consumed_weight::<::Call>( - maximum_weight.clone(), all_weight.clone(), &mandatory1 - ) - ); + assert_ok!(calculate_consumed_weight::<::Call>( + maximum_weight.clone(), + all_weight.clone(), + &mandatory1 + )); assert_err!( - calculate_consumed_weight::<::Call>( maximum_weight, all_weight, &mandatory2), + calculate_consumed_weight::<::Call>( + maximum_weight, + all_weight, + &mandatory2 + ), InvalidTransaction::ExhaustsResources ); } diff --git a/substrate/frame/system/src/extensions/mod.rs b/substrate/frame/system/src/extensions/mod.rs index 8b6c9b49e4d6b99c422e170eb592bc94ae5bafe3..0af9722e475d1e7b71770a7b893bb095ed038c75 100644 --- a/substrate/frame/system/src/extensions/mod.rs +++ b/substrate/frame/system/src/extensions/mod.rs @@ -21,4 +21,3 @@ pub mod check_nonce; pub mod check_spec_version; pub mod check_tx_version; pub mod check_weight; - diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 1c16514750d9a49a27099e1d0e713b02ce1390aa..68681ea5aca605b6bfd7f77bf554fed83a704d6b 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -66,57 +66,55 @@ #[cfg(feature = "std")] use serde::Serialize; -use sp_std::prelude::*; -#[cfg(any(feature = "std", test))] -use sp_std::map; -use sp_std::marker::PhantomData; -use sp_std::fmt::Debug; -use sp_version::RuntimeVersion; use sp_runtime::{ - RuntimeDebug, Perbill, DispatchError, Either, generic, + generic, traits::{ - self, CheckEqual, AtLeast32Bit, Zero, Lookup, LookupError, - SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, - MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, - Dispatchable, AtLeast32BitUnsigned, Saturating, BlockNumberProvider, + self, AtLeast32Bit, AtLeast32BitUnsigned, BadOrigin, BlockNumberProvider, Bounded, + CheckEqual, Dispatchable, Hash, Lookup, LookupError, MaybeDisplay, MaybeMallocSizeOf, + MaybeSerializeDeserialize, Member, One, Saturating, SimpleBitOps, StaticLookup, Zero, }, + DispatchError, Either, Perbill, RuntimeDebug, }; +#[cfg(any(feature = "std", test))] +use sp_std::map; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; +use sp_version::RuntimeVersion; -use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; +use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen}; use frame_support::{ - Parameter, storage, + dispatch::{DispatchResult, DispatchResultWithPostInfo}, + storage, traits::{ - SortedMembers, Get, PalletInfo, OnNewAccount, OnKilledAccount, HandleLifetime, - StoredMap, EnsureOrigin, OriginTrait, Filter, + EnsureOrigin, Filter, Get, HandleLifetime, OnKilledAccount, OnNewAccount, OriginTrait, + PalletInfo, SortedMembers, StoredMap, }, weights::{ - Weight, RuntimeDbWeight, DispatchInfo, DispatchClass, - extract_actual_weight, PerDispatchClass, + extract_actual_weight, DispatchClass, DispatchInfo, PerDispatchClass, RuntimeDbWeight, + Weight, }, - dispatch::{DispatchResultWithPostInfo, DispatchResult}, + Parameter, }; -use codec::{Encode, Decode, FullCodec, EncodeLike, MaxEncodedLen}; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; #[cfg(feature = "std")] use frame_support::traits::GenesisBuild; #[cfg(any(feature = "std", test))] use sp_io::TestExternalities; -pub mod offchain; pub mod limits; #[cfg(test)] pub(crate) mod mock; +pub mod offchain; mod extensions; -pub mod weights; -#[cfg(test)] -mod tests; #[cfg(feature = "std")] pub mod mocking; - +#[cfg(test)] +mod tests; +pub mod weights; pub use extensions::{ - check_mortality::CheckMortality, check_genesis::CheckGenesis, check_nonce::CheckNonce, + check_genesis::CheckGenesis, check_mortality::CheckMortality, check_nonce::CheckNonce, check_spec_version::CheckSpecVersion, check_tx_version::CheckTxVersion, check_weight::CheckWeight, }; @@ -154,7 +152,7 @@ impl SetCode for () { #[frame_support::pallet] pub mod pallet { - use crate::{*, pallet_prelude::*, self as frame_system}; + use crate::{self as frame_system, pallet_prelude::*, *}; use frame_support::pallet_prelude::*; /// System configuration trait. Implemented by runtime. @@ -174,39 +172,69 @@ pub mod pallet { type BlockLength: Get; /// The `Origin` type used by dispatchable calls. - type Origin: - Into, Self::Origin>> + type Origin: Into, Self::Origin>> + From> + Clone - + OriginTrait; + + OriginTrait; /// The aggregated `Call` type. type Call: Dispatchable + Debug; /// Account index (aka nonce) type. This stores the number of previous transactions associated /// with a sender account. - type Index: - Parameter + Member + MaybeSerializeDeserialize + Debug + Default + MaybeDisplay + AtLeast32Bit + type Index: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + Default + + MaybeDisplay + + AtLeast32Bit + Copy; /// The block number type used by the runtime. - type BlockNumber: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + - AtLeast32BitUnsigned + Default + Bounded + Copy + sp_std::hash::Hash + - sp_std::str::FromStr + MaybeMallocSizeOf + MaxEncodedLen; + type BlockNumber: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + AtLeast32BitUnsigned + + Default + + Bounded + + Copy + + sp_std::hash::Hash + + sp_std::str::FromStr + + MaybeMallocSizeOf + + MaxEncodedLen; /// The output of the `Hashing` function. - type Hash: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + SimpleBitOps + Ord - + Default + Copy + CheckEqual + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> - + MaybeMallocSizeOf + MaxEncodedLen; + type Hash: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + SimpleBitOps + + Ord + + Default + + Copy + + CheckEqual + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf + + MaxEncodedLen; /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). - type Hashing: Hash; + type Hashing: Hash; /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord - + Default + MaxEncodedLen; + type AccountId: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + Ord + + Default + + MaxEncodedLen; /// Converting trait to take a source type and convert to `AccountId`. /// @@ -214,16 +242,17 @@ pub mod pallet { /// It's perfectly reasonable for this to be an identity conversion (with the source type being /// `AccountId`), but other pallets (e.g. Indices pallet) may provide more functional/efficient /// alternatives. - type Lookup: StaticLookup; + type Lookup: StaticLookup; /// The block header. - type Header: Parameter + traits::Header< - Number=Self::BlockNumber, - Hash=Self::Hash, - >; + type Header: Parameter + traits::Header; /// The aggregated event type of the runtime. - type Event: Parameter + Member + From> + Debug + IsType<::Event>; + type Event: Parameter + + Member + + From> + + Debug + + IsType<::Event>; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). #[pallet::constant] @@ -288,9 +317,7 @@ pub mod pallet { } fn integrity_test() { - T::BlockWeights::get() - .validate() - .expect("The weights are invalid."); + T::BlockWeights::get().validate().expect("The weights are invalid."); } } @@ -413,7 +440,10 @@ pub mod pallet { T::SystemWeightInfo::set_storage(items.len() as u32), DispatchClass::Operational, ))] - pub fn set_storage(origin: OriginFor, items: Vec) -> DispatchResultWithPostInfo { + pub fn set_storage( + origin: OriginFor, + items: Vec, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; for i in &items { storage::unhashed::put_raw(&i.0, &i.1); @@ -473,7 +503,10 @@ pub mod pallet { /// - 1 event. /// # #[pallet::weight(T::SystemWeightInfo::remark_with_event(remark.len() as u32))] - pub fn remark_with_event(origin: OriginFor, remark: Vec) -> DispatchResultWithPostInfo { + pub fn remark_with_event( + origin: OriginFor, + remark: Vec, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let hash = T::Hashing::hash(&remark[..]); Self::deposit_event(Event::Remarked(who, hash)); @@ -580,8 +613,7 @@ pub mod pallet { /// Events deposited for the current block. #[pallet::storage] #[pallet::getter(fn events)] - pub type Events = - StorageValue<_, Vec>, ValueQuery>; + pub type Events = StorageValue<_, Vec>, ValueQuery>; /// The number of events in the `Events` list. #[pallet::storage] @@ -630,10 +662,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { - changes_trie_config: Default::default(), - code: Default::default(), - } + Self { changes_trie_config: Default::default(), code: Default::default() } } } @@ -649,7 +678,10 @@ pub mod pallet { sp_io::storage::set(well_known_keys::CODE, &self.code); sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); if let Some(ref changes_trie_config) = self.changes_trie_config { - sp_io::storage::set(well_known_keys::CHANGES_TRIE_CONFIG, &changes_trie_config.encode()); + sp_io::storage::set( + well_known_keys::CHANGES_TRIE_CONFIG, + &changes_trie_config.encode(), + ); } } } @@ -661,17 +693,25 @@ pub mod migrations { #[allow(dead_code)] /// Migrate from unique `u8` reference counting to triple `u32` reference counting. pub fn migrate_all() -> frame_support::weights::Weight { - Account::::translate::<(T::Index, u8, T::AccountData), _>(|_key, (nonce, rc, data)| - Some(AccountInfo { nonce, consumers: rc as RefCount, providers: 1, sufficients: 0, data }) - ); + Account::::translate::<(T::Index, u8, T::AccountData), _>(|_key, (nonce, rc, data)| { + Some(AccountInfo { + nonce, + consumers: rc as RefCount, + providers: 1, + sufficients: 0, + data, + }) + }); T::BlockWeights::get().max_block } #[allow(dead_code)] /// Migrate from unique `u32` reference counting to triple `u32` reference counting. pub fn migrate_to_dual_ref_count() -> frame_support::weights::Weight { - Account::::translate::<(T::Index, RefCount, T::AccountData), _>(|_key, (nonce, consumers, data)| - Some(AccountInfo { nonce, consumers, providers: 1, sufficients: 0, data }) + Account::::translate::<(T::Index, RefCount, T::AccountData), _>( + |_key, (nonce, consumers, data)| { + Some(AccountInfo { nonce, consumers, providers: 1, sufficients: 0, data }) + }, ); T::BlockWeights::get().max_block } @@ -681,7 +721,7 @@ pub mod migrations { Account::::translate::<(T::Index, RefCount, RefCount, T::AccountData), _>( |_key, (nonce, consumers, providers, data)| { Some(AccountInfo { nonce, consumers, providers, sufficients: 0, data }) - } + }, ); T::BlockWeights::get().max_block } @@ -701,7 +741,7 @@ impl GenesisConfig { /// Kept in order not to break dependency. pub fn assimilate_storage( &self, - storage: &mut sp_runtime::Storage + storage: &mut sp_runtime::Storage, ) -> Result<(), String> { >::assimilate_storage(self, storage) } @@ -822,18 +862,14 @@ impl LastRuntimeUpgradeInfo { impl From for LastRuntimeUpgradeInfo { fn from(version: sp_version::RuntimeVersion) -> Self { - Self { - spec_version: version.spec_version.into(), - spec_name: version.spec_name, - } + Self { spec_version: version.spec_version.into(), spec_name: version.spec_name } } } pub struct EnsureRoot(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId, -> EnsureOrigin for EnsureRoot { +impl, O>> + From>, AccountId> + EnsureOrigin for EnsureRoot +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -849,10 +885,9 @@ impl< } pub struct EnsureSigned(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId: Default, -> EnsureOrigin for EnsureSigned { +impl, O>> + From>, AccountId: Default> + EnsureOrigin for EnsureSigned +{ type Success = AccountId; fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -869,10 +904,11 @@ impl< pub struct EnsureSignedBy(sp_std::marker::PhantomData<(Who, AccountId)>); impl< - O: Into, O>> + From>, - Who: SortedMembers, - AccountId: PartialEq + Clone + Ord + Default, -> EnsureOrigin for EnsureSignedBy { + O: Into, O>> + From>, + Who: SortedMembers, + AccountId: PartialEq + Clone + Ord + Default, + > EnsureOrigin for EnsureSignedBy +{ type Success = AccountId; fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -893,10 +929,9 @@ impl< } pub struct EnsureNone(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId, -> EnsureOrigin for EnsureNone { +impl, O>> + From>, AccountId> + EnsureOrigin for EnsureNone +{ type Success = (); fn try_origin(o: O) -> Result { o.into().and_then(|o| match o { @@ -929,17 +964,16 @@ impl EnsureOrigin for EnsureNever { /// Origin check will pass if `L` or `R` origin check passes. `L` is tested first. pub struct EnsureOneOf(sp_std::marker::PhantomData<(AccountId, L, R)>); impl< - AccountId, - O: Into, O>> + From>, - L: EnsureOrigin, - R: EnsureOrigin, -> EnsureOrigin for EnsureOneOf { + AccountId, + O: Into, O>> + From>, + L: EnsureOrigin, + R: EnsureOrigin, + > EnsureOrigin for EnsureOneOf +{ type Success = Either; fn try_origin(o: O) -> Result { - L::try_origin(o).map_or_else( - |o| R::try_origin(o).map(|o| Either::Right(o)), - |o| Ok(Either::Left(o)), - ) + L::try_origin(o) + .map_or_else(|o| R::try_origin(o).map(|o| Either::Right(o)), |o| Ok(Either::Left(o))) } #[cfg(feature = "runtime-benchmarks")] @@ -951,7 +985,8 @@ impl< /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). /// Returns `Ok` with the account that signed the extrinsic or an `Err` otherwise. pub fn ensure_signed(o: OuterOrigin) -> Result - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::Signed(t)) => Ok(t), @@ -961,7 +996,8 @@ pub fn ensure_signed(o: OuterOrigin) -> Result(o: OuterOrigin) -> Result<(), BadOrigin> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::Root) => Ok(()), @@ -971,7 +1007,8 @@ pub fn ensure_root(o: OuterOrigin) -> Result<(), BadOrig /// Ensure that the origin `o` represents an unsigned extrinsic. Returns `Ok` or an `Err` otherwise. pub fn ensure_none(o: OuterOrigin) -> Result<(), BadOrigin> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { match o.into() { Ok(RawOrigin::None) => Ok(()), @@ -1057,14 +1094,16 @@ impl Pallet { /// Increment the provider reference counter on an account. pub fn inc_providers(who: &T::AccountId) -> IncRefStatus { - Account::::mutate(who, |a| if a.providers == 0 && a.sufficients == 0 { - // Account is being created. - a.providers = 1; - Self::on_created_account(who.clone(), a); - IncRefStatus::Created - } else { - a.providers = a.providers.saturating_add(1); - IncRefStatus::Existed + Account::::mutate(who, |a| { + if a.providers == 0 && a.sufficients == 0 { + // Account is being created. + a.providers = 1; + Self::on_created_account(who.clone(), a); + IncRefStatus::Created + } else { + a.providers = a.providers.saturating_add(1); + IncRefStatus::Existed + } }) } @@ -1088,18 +1127,18 @@ impl Pallet { Pallet::::on_killed_account(who.clone()); Ok(DecRefStatus::Reaped) - } + }, (1, c, _) if c > 0 => { // Cannot remove last provider if there are consumers. Err(DispatchError::ConsumerRemaining) - } + }, (x, _, _) => { // Account will continue to exist as there is either > 1 provider or // > 0 sufficients. account.providers = x - 1; *maybe_account = Some(account); Ok(DecRefStatus::Exists) - } + }, } } else { log::error!( @@ -1113,14 +1152,16 @@ impl Pallet { /// Increment the self-sufficient reference counter on an account. pub fn inc_sufficients(who: &T::AccountId) -> IncRefStatus { - Account::::mutate(who, |a| if a.providers + a.sufficients == 0 { - // Account is being created. - a.sufficients = 1; - Self::on_created_account(who.clone(), a); - IncRefStatus::Created - } else { - a.sufficients = a.sufficients.saturating_add(1); - IncRefStatus::Existed + Account::::mutate(who, |a| { + if a.providers + a.sufficients == 0 { + // Account is being created. + a.sufficients = 1; + Self::on_created_account(who.clone(), a); + IncRefStatus::Created + } else { + a.sufficients = a.sufficients.saturating_add(1); + IncRefStatus::Existed + } }) } @@ -1141,12 +1182,12 @@ impl Pallet { (0, 0) | (1, 0) => { Pallet::::on_killed_account(who.clone()); DecRefStatus::Reaped - } + }, (x, _) => { account.sufficients = x - 1; *maybe_account = Some(account); DecRefStatus::Exists - } + }, } } else { log::error!( @@ -1178,24 +1219,28 @@ impl Pallet { /// /// The account `who`'s `providers` must be non-zero or this will return an error. pub fn inc_consumers(who: &T::AccountId) -> Result<(), DispatchError> { - Account::::try_mutate(who, |a| if a.providers > 0 { - a.consumers = a.consumers.saturating_add(1); - Ok(()) - } else { - Err(DispatchError::NoProviders) + Account::::try_mutate(who, |a| { + if a.providers > 0 { + a.consumers = a.consumers.saturating_add(1); + Ok(()) + } else { + Err(DispatchError::NoProviders) + } }) } /// Decrement the reference counter on an account. This *MUST* only be done once for every time /// you called `inc_consumers` on `who`. pub fn dec_consumers(who: &T::AccountId) { - Account::::mutate(who, |a| if a.consumers > 0 { - a.consumers -= 1; - } else { - log::error!( - target: "runtime::system", - "Logic error: Unexpected underflow in reducing consumer", - ); + Account::::mutate(who, |a| { + if a.consumers > 0 { + a.consumers -= 1; + } else { + log::error!( + target: "runtime::system", + "Logic error: Unexpected underflow in reducing consumer", + ); + } }) } @@ -1233,14 +1278,13 @@ impl Pallet { pub fn deposit_event_indexed(topics: &[T::Hash], event: T::Event) { let block_number = Self::block_number(); // Don't populate events on genesis. - if block_number.is_zero() { return } + if block_number.is_zero() { + return + } let phase = ExecutionPhase::::get().unwrap_or_default(); - let event = EventRecord { - phase, - event, - topics: topics.iter().cloned().collect::>(), - }; + let event = + EventRecord { phase, event, topics: topics.iter().cloned().collect::>() }; // Index of the to be added event. let event_idx = { @@ -1366,12 +1410,18 @@ impl Pallet { if let Some(storage_changes_root) = storage_changes_root { let item = generic::DigestItem::ChangesTrieRoot( T::Hash::decode(&mut &storage_changes_root[..]) - .expect("Node is configured to use the same hash; qed") + .expect("Node is configured to use the same hash; qed"), ); digest.push(item); } - ::new(number, extrinsics_root, storage_root, parent_hash, digest) + ::new( + number, + extrinsics_root, + storage_root, + parent_hash, + digest, + ) } /// Deposits a log and ensures it matches the block's log data. @@ -1448,7 +1498,9 @@ impl Pallet { } /// Return the chain's current runtime version. - pub fn runtime_version() -> RuntimeVersion { T::Version::get() } + pub fn runtime_version() -> RuntimeVersion { + T::Version::get() + } /// Retrieve the account transaction counter from storage. pub fn account_nonce(who: impl EncodeLike) -> T::Index { @@ -1471,20 +1523,18 @@ impl Pallet { /// To be called immediately after an extrinsic has been applied. pub fn note_applied_extrinsic(r: &DispatchResultWithPostInfo, mut info: DispatchInfo) { info.weight = extract_actual_weight(r, &info); - Self::deposit_event( - match r { - Ok(_) => Event::ExtrinsicSuccess(info), - Err(err) => { - log::trace!( - target: "runtime::system", - "Extrinsic failed at block({:?}): {:?}", - Self::block_number(), - err, - ); - Event::ExtrinsicFailed(err.error, info) - }, - } - ); + Self::deposit_event(match r { + Ok(_) => Event::ExtrinsicSuccess(info), + Err(err) => { + log::trace!( + target: "runtime::system", + "Extrinsic failed at block({:?}): {:?}", + Self::block_number(), + err, + ); + Event::ExtrinsicFailed(err.error, info) + }, + }); let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; @@ -1495,8 +1545,8 @@ impl Pallet { /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block /// has been called. pub fn note_finished_extrinsics() { - let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX) - .unwrap_or_default(); + let extrinsic_index: u32 = + storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); ExtrinsicCount::::put(extrinsic_index); ExecutionPhase::::put(Phase::Finalization); } @@ -1579,8 +1629,7 @@ impl HandleLifetime for Consumer { } } -impl BlockNumberProvider for Pallet -{ +impl BlockNumberProvider for Pallet { type BlockNumber = ::BlockNumber; fn current_block_number() -> Self::BlockNumber { @@ -1618,7 +1667,7 @@ impl StoredMap for Pallet { DecRefStatus::Reaped => return Ok(result), DecRefStatus::Exists => { // Update value as normal... - } + }, } } else if !was_providing && !is_providing { return Ok(result) @@ -1629,14 +1678,15 @@ impl StoredMap for Pallet { } /// Split an `option` into two constituent options, as defined by a `splitter` function. -pub fn split_inner(option: Option, splitter: impl FnOnce(T) -> (R, S)) - -> (Option, Option) -{ +pub fn split_inner( + option: Option, + splitter: impl FnOnce(T) -> (R, S), +) -> (Option, Option) { match option { Some(inner) => { let (r, s) = splitter(inner); (Some(r), Some(s)) - } + }, None => (None, None), } } @@ -1659,7 +1709,7 @@ impl Lookup for ChainContext { /// Prelude to be used alongside pallet macro, for ease of use. pub mod pallet_prelude { - pub use crate::{ensure_signed, ensure_none, ensure_root}; + pub use crate::{ensure_none, ensure_root, ensure_signed}; /// Type alias for the `Origin` associated type of system config. pub type OriginFor = ::Origin; diff --git a/substrate/frame/system/src/limits.rs b/substrate/frame/system/src/limits.rs index 49a458224020c55042f90c469f3847a95d318f69..74ffc828314b883a7f113f9dda0b9fe46d781622 100644 --- a/substrate/frame/system/src/limits.rs +++ b/substrate/frame/system/src/limits.rs @@ -25,8 +25,8 @@ //! `DispatchClass`. This module contains configuration object for both resources, //! which should be passed to `frame_system` configuration when runtime is being set up. -use frame_support::weights::{Weight, DispatchClass, constants, PerDispatchClass, OneOrMany}; -use sp_runtime::{RuntimeDebug, Perbill}; +use frame_support::weights::{constants, DispatchClass, OneOrMany, PerDispatchClass, Weight}; +use sp_runtime::{Perbill, RuntimeDebug}; /// Block length limit configuration. #[derive(RuntimeDebug, Clone, codec::Encode, codec::Decode)] @@ -40,29 +40,26 @@ pub struct BlockLength { impl Default for BlockLength { fn default() -> Self { - BlockLength::max_with_normal_ratio( - 5 * 1024 * 1024, - DEFAULT_NORMAL_RATIO, - ) + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, DEFAULT_NORMAL_RATIO) } } impl BlockLength { /// Create new `BlockLength` with `max` for every class. pub fn max(max: u32) -> Self { - Self { - max: PerDispatchClass::new(|_| max), - } + Self { max: PerDispatchClass::new(|_| max) } } /// Create new `BlockLength` with `max` for `Operational` & `Mandatory` /// and `normal * max` for `Normal`. pub fn max_with_normal_ratio(max: u32, normal: Perbill) -> Self { Self { - max: PerDispatchClass::new(|class| if class == DispatchClass::Normal { - normal * max - } else { - max + max: PerDispatchClass::new(|class| { + if class == DispatchClass::Normal { + normal * max + } else { + max + } }), } } @@ -206,10 +203,7 @@ pub struct BlockWeights { impl Default for BlockWeights { fn default() -> Self { - Self::with_sensible_defaults( - 1 * constants::WEIGHT_PER_SECOND, - DEFAULT_NORMAL_RATIO, - ) + Self::with_sensible_defaults(1 * constants::WEIGHT_PER_SECOND, DEFAULT_NORMAL_RATIO) } } @@ -245,7 +239,8 @@ impl BlockWeights { weights.max_extrinsic.unwrap_or(0) <= max_for_class.saturating_sub(base_for_class), &mut error, "[{:?}] {:?} (max_extrinsic) can't be greater than {:?} (max for class)", - class, weights.max_extrinsic, + class, + weights.max_extrinsic, max_for_class.saturating_sub(base_for_class), ); // Max extrinsic should not be 0 @@ -260,21 +255,27 @@ impl BlockWeights { reserved > base_for_class || reserved == 0, &mut error, "[{:?}] {:?} (reserved) has to be greater than {:?} (base extrinsic) if set", - class, reserved, base_for_class, + class, + reserved, + base_for_class, ); // Make sure max block is greater than max_total if it's set. error_assert!( self.max_block >= weights.max_total.unwrap_or(0), &mut error, "[{:?}] {:?} (max block) has to be greater than {:?} (max for class)", - class, self.max_block, weights.max_total, + class, + self.max_block, + weights.max_total, ); // Make sure we can fit at least one extrinsic. error_assert!( self.max_block > base_for_class + self.base_block, &mut error, "[{:?}] {:?} (max block) must fit at least one extrinsic {:?} (base weight)", - class, self.max_block, base_for_class + self.base_block, + class, + self.max_block, + base_for_class + self.base_block, ); } @@ -309,10 +310,7 @@ impl BlockWeights { /// Assumptions: /// - Average block initialization is assumed to be `10%`. /// - `Operational` transactions have reserved allowance (`1.0 - normal_ratio`) - pub fn with_sensible_defaults( - expected_block_weight: Weight, - normal_ratio: Perbill, - ) -> Self { + pub fn with_sensible_defaults(expected_block_weight: Weight, normal_ratio: Perbill) -> Self { let normal_weight = normal_ratio * expected_block_weight; Self::builder() .for_class(DispatchClass::Normal, |weights| { @@ -388,7 +386,7 @@ impl BlockWeightsBuilder { for class in class.into_iter() { action(self.weights.per_class.get_mut(class)); } - self + self } /// Construct the `BlockWeights` object. @@ -408,7 +406,8 @@ impl BlockWeightsBuilder { for class in DispatchClass::all() { let per_class = weights.per_class.get_mut(*class); if per_class.max_extrinsic.is_none() && init_cost.is_some() { - per_class.max_extrinsic = per_class.max_total + per_class.max_extrinsic = per_class + .max_total .map(|x| x.saturating_sub(init_weight)) .map(|x| x.saturating_sub(per_class.base_extrinsic)); } @@ -435,8 +434,6 @@ mod tests { #[test] fn default_weights_are_valid() { - BlockWeights::default() - .validate() - .unwrap(); + BlockWeights::default().validate().unwrap(); } } diff --git a/substrate/frame/system/src/mock.rs b/substrate/frame/system/src/mock.rs index e9b6fb7d968ec020a4aee3da2a099fcaecc507b0..480e8b1a26bae78a9bd6142b1ea9d3fda0d90597 100644 --- a/substrate/frame/system/src/mock.rs +++ b/substrate/frame/system/src/mock.rs @@ -16,13 +16,14 @@ // limitations under the License. use crate::{self as frame_system, *}; -use sp_std::cell::RefCell; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ + testing::Header, traits::{BlakeTwo256, IdentityLookup}, - testing::Header, BuildStorage, + BuildStorage, }; -use frame_support::parameter_types; +use sp_std::cell::RefCell; type UncheckedExtrinsic = mocking::MockUncheckedExtrinsic; type Block = mocking::MockBlock; @@ -75,13 +76,15 @@ parameter_types! { limits::BlockLength::max_with_normal_ratio(1024, NORMAL_DISPATCH_RATIO); } -thread_local!{ +thread_local! { pub static KILLED: RefCell> = RefCell::new(vec![]); } pub struct RecordKilled; impl OnKilledAccount for RecordKilled { - fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } + fn on_killed_account(who: &u64) { + KILLED.with(|r| r.borrow_mut().push(*who)) + } } impl Config for Test { @@ -117,12 +120,14 @@ pub const CALL: &::Call = &Call::System(frame_system::Call::set_ /// Create new externalities for `System` module tests. pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig::default() - .build_storage().unwrap().into(); + let mut ext: sp_io::TestExternalities = + GenesisConfig::default().build_storage().unwrap().into(); // Add to each test the initial weight of a block - ext.execute_with(|| System::register_extra_weight_unchecked( - ::BlockWeights::get().base_block, - DispatchClass::Mandatory - )); + ext.execute_with(|| { + System::register_extra_weight_unchecked( + ::BlockWeights::get().base_block, + DispatchClass::Mandatory, + ) + }); ext } diff --git a/substrate/frame/system/src/mocking.rs b/substrate/frame/system/src/mocking.rs index 9f80c59a9c4d22cc477f4feec6ac9a4820845b2e..7e6026b726186d9e574aa4fe470ee231e15c24c5 100644 --- a/substrate/frame/system/src/mocking.rs +++ b/substrate/frame/system/src/mocking.rs @@ -21,7 +21,10 @@ use sp_runtime::generic; /// An unchecked extrinsic type to be used in tests. pub type MockUncheckedExtrinsic = generic::UncheckedExtrinsic< - ::AccountId, ::Call, Signature, Extra, + ::AccountId, + ::Call, + Signature, + Extra, >; /// An implementation of `sp_runtime::traits::Block` to be used in tests. diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index 6769923bc04b8b1f27c92b6b841765c9331b7c76..e9f3d82ea3c289e4b49883650a86af69b530670b 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -57,12 +57,16 @@ #![warn(missing_docs)] use codec::Encode; -use sp_std::collections::btree_set::BTreeSet; -use sp_std::convert::{TryInto, TryFrom}; -use sp_std::prelude::{Box, Vec}; -use sp_runtime::app_crypto::RuntimeAppPublic; -use sp_runtime::traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}; use frame_support::RuntimeDebug; +use sp_runtime::{ + app_crypto::RuntimeAppPublic, + traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}, +}; +use sp_std::{ + collections::btree_set::BTreeSet, + convert::{TryFrom, TryInto}, + prelude::{Box, Vec}, +}; /// Marker struct used to flag using all supported keys to sign a payload. pub struct ForAll {} @@ -76,7 +80,7 @@ pub struct ForAny {} /// utility function can be used. However, this struct is used by `Signer` /// to submit a signed transactions providing the signature along with the call. pub struct SubmitTransaction, OverarchingCall> { - _phantom: sp_std::marker::PhantomData<(T, OverarchingCall)> + _phantom: sp_std::marker::PhantomData<(T, OverarchingCall)>, } impl SubmitTransaction @@ -120,10 +124,7 @@ pub struct Signer, X = Fo impl, X> Default for Signer { fn default() -> Self { - Self { - accounts: Default::default(), - _phantom: Default::default(), - } + Self { accounts: Default::default(), _phantom: Default::default() } } } @@ -161,72 +162,73 @@ impl, X> Signer let keystore_accounts = self.keystore_accounts(); match self.accounts { None => Box::new(keystore_accounts), - Some(ref keys) => { - let keystore_lookup: BTreeSet<::Public> = keystore_accounts - .map(|account| account.public).collect(); - - Box::new(keys.into_iter() - .enumerate() - .map(|(index, key)| { - let account_id = key.clone().into_account(); - Account::new(index, account_id, key.clone()) - }) - .filter(move |account| keystore_lookup.contains(&account.public))) - } + Some(ref keys) => { + let keystore_lookup: BTreeSet<::Public> = + keystore_accounts.map(|account| account.public).collect(); + + Box::new( + keys.into_iter() + .enumerate() + .map(|(index, key)| { + let account_id = key.clone().into_account(); + Account::new(index, account_id, key.clone()) + }) + .filter(move |account| keystore_lookup.contains(&account.public)), + ) + }, } } fn keystore_accounts(&self) -> impl Iterator> { - C::RuntimeAppPublic::all() - .into_iter() - .enumerate() - .map(|(index, key)| { - let generic_public = C::GenericPublic::from(key); - let public: T::Public = generic_public.into(); - let account_id = public.clone().into_account(); - Account::new(index, account_id, public) - }) + C::RuntimeAppPublic::all().into_iter().enumerate().map(|(index, key)| { + let generic_public = C::GenericPublic::from(key); + let public: T::Public = generic_public.into(); + let account_id = public.clone().into_account(); + Account::new(index, account_id, public) + }) } } - impl> Signer { - fn for_all(&self, f: F) -> Vec<(Account, R)> where + fn for_all(&self, f: F) -> Vec<(Account, R)> + where F: Fn(&Account) -> Option, { let accounts = self.accounts_from_keys(); accounts .into_iter() - .filter_map(|account| { - f(&account).map(|res| (account, res)) - }) + .filter_map(|account| f(&account).map(|res| (account, res))) .collect() } } impl> Signer { - fn for_any(&self, f: F) -> Option<(Account, R)> where + fn for_any(&self, f: F) -> Option<(Account, R)> + where F: Fn(&Account) -> Option, { let accounts = self.accounts_from_keys(); for account in accounts.into_iter() { let res = f(&account); if let Some(res) = res { - return Some((account, res)); + return Some((account, res)) } } None } } -impl> SignMessage for Signer { +impl> SignMessage + for Signer +{ type SignatureData = Vec<(Account, T::Signature)>; fn sign_message(&self, message: &[u8]) -> Self::SignatureData { self.for_all(|account| C::sign(message, account.public.clone())) } - fn sign(&self, f: F) -> Self::SignatureData where + fn sign(&self, f: F) -> Self::SignatureData + where F: Fn(&Account) -> TPayload, TPayload: SignedPayload, { @@ -234,14 +236,17 @@ impl> SignMessage for } } -impl> SignMessage for Signer { +impl> SignMessage + for Signer +{ type SignatureData = Option<(Account, T::Signature)>; fn sign_message(&self, message: &[u8]) -> Self::SignatureData { self.for_any(|account| C::sign(message, account.public.clone())) } - fn sign(&self, f: F) -> Self::SignatureData where + fn sign(&self, f: F) -> Self::SignatureData + where F: Fn(&Account) -> TPayload, TPayload: SignedPayload, { @@ -250,16 +255,14 @@ impl> SignMessage for } impl< - T: CreateSignedTransaction + SigningTypes, - C: AppCrypto, - LocalCall, -> SendSignedTransaction for Signer { + T: CreateSignedTransaction + SigningTypes, + C: AppCrypto, + LocalCall, + > SendSignedTransaction for Signer +{ type Result = Option<(Account, Result<(), ()>)>; - fn send_signed_transaction( - &self, - f: impl Fn(&Account) -> LocalCall, - ) -> Self::Result { + fn send_signed_transaction(&self, f: impl Fn(&Account) -> LocalCall) -> Self::Result { self.for_any(|account| { let call = f(account); self.send_single_signed_transaction(account, call) @@ -268,16 +271,14 @@ impl< } impl< - T: SigningTypes + CreateSignedTransaction, - C: AppCrypto, - LocalCall, -> SendSignedTransaction for Signer { + T: SigningTypes + CreateSignedTransaction, + C: AppCrypto, + LocalCall, + > SendSignedTransaction for Signer +{ type Result = Vec<(Account, Result<(), ()>)>; - fn send_signed_transaction( - &self, - f: impl Fn(&Account) -> LocalCall, - ) -> Self::Result { + fn send_signed_transaction(&self, f: impl Fn(&Account) -> LocalCall) -> Self::Result { self.for_all(|account| { let call = f(account); self.send_single_signed_transaction(account, call) @@ -286,10 +287,11 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, - C: AppCrypto, - LocalCall, -> SendUnsignedTransaction for Signer { + T: SigningTypes + SendTransactionTypes, + C: AppCrypto, + LocalCall, + > SendUnsignedTransaction for Signer +{ type Result = Option<(Account, Result<(), ()>)>; fn send_unsigned_transaction( @@ -303,7 +305,7 @@ impl< { self.for_any(|account| { let payload = f(account); - let signature= payload.sign::()?; + let signature = payload.sign::()?; let call = f2(payload, signature); self.submit_unsigned_transaction(call) }) @@ -311,10 +313,11 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, - C: AppCrypto, - LocalCall, -> SendUnsignedTransaction for Signer { + T: SigningTypes + SendTransactionTypes, + C: AppCrypto, + LocalCall, + > SendUnsignedTransaction for Signer +{ type Result = Vec<(Account, Result<(), ()>)>; fn send_unsigned_transaction( @@ -324,7 +327,8 @@ impl< ) -> Self::Result where F: Fn(&Account) -> TPayload, - TPayload: SignedPayload { + TPayload: SignedPayload, + { self.for_all(|account| { let payload = f(account); let signature = payload.sign::()?; @@ -352,16 +356,13 @@ impl Account { } } -impl Clone for Account where +impl Clone for Account +where T::AccountId: Clone, T::Public: Clone, { fn clone(&self) -> Self { - Self { - index: self.index, - id: self.id.clone(), - public: self.public.clone(), - } + Self { index: self.index, id: self.id.clone(), public: self.public.clone() } } } @@ -375,9 +376,9 @@ impl Clone for Account where /// The point of this trait is to be able to easily convert between `RuntimeAppPublic`, the wrapped /// (generic = non application-specific) crypto types and the `Public` type required by the runtime. /// -/// Example (pseudo-)implementation: +/// Example (pseudo-)implementation: /// ```ignore -/// // im-online specific crypto +/// // im-online specific crypto /// type RuntimeAppPublic = ImOnline(sr25519::Public); /// /// // wrapped "raw" crypto @@ -395,15 +396,13 @@ pub trait AppCrypto { type RuntimeAppPublic: RuntimeAppPublic; /// A raw crypto public key wrapped by `RuntimeAppPublic`. - type GenericPublic: - From + type GenericPublic: From + Into + TryFrom + Into; /// A matching raw crypto `Signature` type. - type GenericSignature: - From<::Signature> + type GenericSignature: From<::Signature> + Into<::Signature> + TryFrom + Into; @@ -424,16 +423,15 @@ pub trait AppCrypto { fn verify(payload: &[u8], public: Public, signature: Signature) -> bool { let p: Self::GenericPublic = match public.try_into() { Ok(a) => a, - _ => return false + _ => return false, }; let x = Into::::into(p); let signature: Self::GenericSignature = match signature.try_into() { Ok(a) => a, - _ => return false + _ => return false, }; - let signature = Into::<< - Self::RuntimeAppPublic as RuntimeAppPublic - >::Signature>::into(signature); + let signature = + Into::<::Signature>::into(signature); x.verify(&payload, &signature) } @@ -443,7 +441,6 @@ pub trait AppCrypto { /// /// This trait adds extra bounds to `Public` and `Signature` types of the runtime /// that are necessary to use these types for signing. -/// // TODO [#5663] Could this be just `T::Signature as traits::Verify>::Signer`? // Seems that this may cause issues with bounds resolution. pub trait SigningTypes: crate::Config { @@ -459,16 +456,13 @@ pub trait SigningTypes: crate::Config { + Ord; /// A matching `Signature` type. - type Signature: Clone - + PartialEq - + core::fmt::Debug - + codec::Codec; + type Signature: Clone + PartialEq + core::fmt::Debug + codec::Codec; } /// A definition of types required to submit transactions from within the runtime. pub trait SendTransactionTypes { /// The extrinsic type expected by the runtime. - type Extrinsic: ExtrinsicT + codec::Encode; + type Extrinsic: ExtrinsicT + codec::Encode; /// The runtime's call type. /// /// This has additional bound to be able to be created from pallet-local `Call` types. @@ -482,7 +476,9 @@ pub trait SendTransactionTypes { /// This will most likely include creation of `SignedExtra` (a set of `SignedExtensions`). /// Note that the result can be altered by inspecting the `Call` (for instance adjusting /// fees, or mortality depending on the `pallet` being called). -pub trait CreateSignedTransaction: SendTransactionTypes + SigningTypes { +pub trait CreateSignedTransaction: + SendTransactionTypes + SigningTypes +{ /// Attempt to create signed extrinsic data that encodes call from given account. /// /// Runtime implementation is free to construct the payload to sign and the signature @@ -514,18 +510,19 @@ pub trait SignMessage { /// /// This method expects `f` to return a `SignedPayload` /// object which is then used for signing. - fn sign(&self, f: F) -> Self::SignatureData where + fn sign(&self, f: F) -> Self::SignatureData + where F: Fn(&Account) -> TPayload, - TPayload: SignedPayload, - ; + TPayload: SignedPayload; } /// Submit a signed transaction to the transaction pool. pub trait SendSignedTransaction< T: SigningTypes + CreateSignedTransaction, C: AppCrypto, - LocalCall -> { + LocalCall, +> +{ /// A submission result. /// /// This should contain an indication of success and the account that was used for signing. @@ -537,10 +534,7 @@ pub trait SendSignedTransaction< /// to be returned. /// The call is then wrapped into a transaction (see `#CreateSignedTransaction`), signed and /// submitted to the pool. - fn send_signed_transaction( - &self, - f: impl Fn(&Account) -> LocalCall, - ) -> Self::Result; + fn send_signed_transaction(&self, f: impl Fn(&Account) -> LocalCall) -> Self::Result; /// Wraps the call into transaction, signs using given account and submits to the pool. fn send_single_signed_transaction( @@ -559,10 +553,9 @@ pub trait SendSignedTransaction< call.into(), account.public.clone(), account.id.clone(), - account_data.nonce + account_data.nonce, )?; - let res = SubmitTransaction:: - ::submit_transaction(call, Some(signature)); + let res = SubmitTransaction::::submit_transaction(call, Some(signature)); if res.is_ok() { // increment the nonce. This is fine, since the code should always @@ -576,10 +569,7 @@ pub trait SendSignedTransaction< } /// Submit an unsigned transaction onchain with a signed payload -pub trait SendUnsignedTransaction< - T: SigningTypes + SendTransactionTypes, - LocalCall, -> { +pub trait SendUnsignedTransaction, LocalCall> { /// A submission result. /// /// Should contain the submission result and the account(s) that signed the payload. @@ -601,12 +591,8 @@ pub trait SendUnsignedTransaction< TPayload: SignedPayload; /// Submits an unsigned call to the transaction pool. - fn submit_unsigned_transaction( - &self, - call: LocalCall - ) -> Option> { - Some(SubmitTransaction:: - ::submit_unsigned_transaction(call.into())) + fn submit_unsigned_transaction(&self, call: LocalCall) -> Option> { + Some(SubmitTransaction::::submit_unsigned_transaction(call.into())) } } @@ -631,14 +617,13 @@ pub trait SignedPayload: Encode { } } - #[cfg(test)] mod tests { use super::*; + use crate::mock::{Call, Test as TestRuntime, CALL}; use codec::Decode; - use crate::mock::{Test as TestRuntime, Call, CALL}; use sp_core::offchain::{testing, TransactionPoolExt}; - use sp_runtime::testing::{UintAuthorityId, TestSignature, TestXt}; + use sp_runtime::testing::{TestSignature, TestXt, UintAuthorityId}; impl SigningTypes for TestRuntime { type Public = UintAuthorityId; @@ -675,16 +660,8 @@ mod tests { type GenericSignature = TestSignature; } - fn assert_account( - next: Option<(Account, Result<(), ()>)>, - index: usize, - id: u64, - ) { - assert_eq!(next, Some((Account { - index, - id, - public: id.into(), - }, Ok(())))); + fn assert_account(next: Option<(Account, Result<(), ()>)>, index: usize, id: u64) { + assert_eq!(next, Some((Account { index, id, public: id.into() }, Ok(())))); } #[test] @@ -699,16 +676,10 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::all_accounts() + let result = Signer::::all_accounts() .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - CALL.clone() - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -740,16 +711,10 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::any_account() + let result = Signer::::any_account() .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - CALL.clone() - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -777,17 +742,11 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::all_accounts() + let result = Signer::::all_accounts() .with_filter(vec![0xf2.into(), 0xf1.into()]) .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - CALL.clone() - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -817,17 +776,11 @@ mod tests { t.execute_with(|| { // when - let result = Signer:: - ::any_account() + let result = Signer::::any_account() .with_filter(vec![0xf2.into(), 0xf1.into()]) .send_unsigned_transaction( - |account| SimplePayload { - data: vec![1, 2, 3], - public: account.public.clone() - }, - |_payload, _signature| { - CALL.clone() - } + |account| SimplePayload { data: vec![1, 2, 3], public: account.public.clone() }, + |_payload, _signature| CALL.clone(), ); // then @@ -842,5 +795,4 @@ mod tests { assert_eq!(tx1.signature, None); }); } - } diff --git a/substrate/frame/system/src/tests.rs b/substrate/frame/system/src/tests.rs index 77d4baee88ac1a3fda6705ced099857e3280e58a..f171fe661f69300647ab2c4007fa1eb41156580f 100644 --- a/substrate/frame/system/src/tests.rs +++ b/substrate/frame/system/src/tests.rs @@ -16,11 +16,14 @@ // limitations under the License. use crate::*; -use mock::{*, Origin}; -use sp_core::H256; -use sp_runtime::{DispatchError, DispatchErrorWithPostInfo, traits::{Header, BlakeTwo256}}; use frame_support::{ - assert_noop, assert_ok, weights::WithPostDispatchInfo, dispatch::PostDispatchInfo + assert_noop, assert_ok, dispatch::PostDispatchInfo, weights::WithPostDispatchInfo, +}; +use mock::{Origin, *}; +use sp_core::H256; +use sp_runtime::{ + traits::{BlakeTwo256, Header}, + DispatchError, DispatchErrorWithPostInfo, }; #[test] @@ -36,13 +39,10 @@ fn stored_map_works() { assert_ok!(System::insert(&0, 42)); assert!(!System::is_provider_required(&0)); - assert_eq!(Account::::get(0), AccountInfo { - nonce: 0, - providers: 1, - consumers: 0, - sufficients: 0, - data: 42, - }); + assert_eq!( + Account::::get(0), + AccountInfo { nonce: 0, providers: 1, consumers: 0, sufficients: 0, data: 42 } + ); assert_ok!(System::inc_consumers(&0)); assert!(System::is_provider_required(&0)); @@ -154,40 +154,25 @@ fn provider_required_to_support_consumer() { #[test] fn deposit_event_should_work() { new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&1, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::note_finished_extrinsics(); System::deposit_event(SysEvent::CodeUpdated); System::finalize(); assert_eq!( System::events(), - vec![ - EventRecord { - phase: Phase::Finalization, - event: SysEvent::CodeUpdated.into(), - topics: vec![], - } - ] + vec![EventRecord { + phase: Phase::Finalization, + event: SysEvent::CodeUpdated.into(), + topics: vec![], + }] ); - System::initialize( - &2, - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&2, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::deposit_event(SysEvent::NewAccount(32)); System::note_finished_initialize(); System::deposit_event(SysEvent::KilledAccount(42)); System::note_applied_extrinsic(&Ok(().into()), Default::default()); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.into()), - Default::default() - ); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.into()), Default::default()); System::note_finished_extrinsics(); System::deposit_event(SysEvent::NewAccount(3)); System::finalize(); @@ -214,7 +199,8 @@ fn deposit_event_should_work() { event: SysEvent::ExtrinsicFailed( DispatchError::BadOrigin.into(), Default::default() - ).into(), + ) + .into(), topics: vec![] }, EventRecord { @@ -230,78 +216,56 @@ fn deposit_event_should_work() { #[test] fn deposit_event_uses_actual_weight() { new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&1, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::note_finished_initialize(); - let pre_info = DispatchInfo { - weight: 1000, - .. Default::default() - }; - System::note_applied_extrinsic( - &Ok(Some(300).into()), - pre_info, - ); - System::note_applied_extrinsic( - &Ok(Some(1000).into()), - pre_info, - ); + let pre_info = DispatchInfo { weight: 1000, ..Default::default() }; + System::note_applied_extrinsic(&Ok(Some(300).into()), pre_info); + System::note_applied_extrinsic(&Ok(Some(1000).into()), pre_info); System::note_applied_extrinsic( // values over the pre info should be capped at pre dispatch value &Ok(Some(1200).into()), pre_info, ); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.with_weight(999)), - pre_info, - ); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.with_weight(999)), pre_info); assert_eq!( System::events(), vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 300, - .. Default::default() - }, - ).into(), + event: SysEvent::ExtrinsicSuccess(DispatchInfo { + weight: 300, + ..Default::default() + },) + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 1000, - .. Default::default() - }, - ).into(), + event: SysEvent::ExtrinsicSuccess(DispatchInfo { + weight: 1000, + ..Default::default() + },) + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: SysEvent::ExtrinsicSuccess( - DispatchInfo { - weight: 1000, - .. Default::default() - }, - ).into(), + event: SysEvent::ExtrinsicSuccess(DispatchInfo { + weight: 1000, + ..Default::default() + },) + .into(), topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(3), event: SysEvent::ExtrinsicFailed( DispatchError::BadOrigin.into(), - DispatchInfo { - weight: 999, - .. Default::default() - }, - ).into(), + DispatchInfo { weight: 999, ..Default::default() }, + ) + .into(), topics: vec![] }, ] @@ -314,19 +278,10 @@ fn deposit_event_topics() { new_test_ext().execute_with(|| { const BLOCK_NUMBER: u64 = 1; - System::initialize( - &BLOCK_NUMBER, - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&BLOCK_NUMBER, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::note_finished_extrinsics(); - let topics = vec![ - H256::repeat_byte(1), - H256::repeat_byte(2), - H256::repeat_byte(3), - ]; + let topics = vec![H256::repeat_byte(1), H256::repeat_byte(2), H256::repeat_byte(3)]; // We deposit a few events with different sets of topics. System::deposit_event_indexed(&topics[0..3], SysEvent::NewAccount(1).into()); @@ -359,18 +314,9 @@ fn deposit_event_topics() { // Check that the topic-events mapping reflects the deposited topics. // Note that these are indexes of the events. - assert_eq!( - System::event_topics(&topics[0]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)], - ); - assert_eq!( - System::event_topics(&topics[1]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)], - ); - assert_eq!( - System::event_topics(&topics[2]), - vec![(BLOCK_NUMBER, 0)], - ); + assert_eq!(System::event_topics(&topics[0]), vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)],); + assert_eq!(System::event_topics(&topics[1]), vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)],); + assert_eq!(System::event_topics(&topics[2]), vec![(BLOCK_NUMBER, 0)],); }); } @@ -390,30 +336,19 @@ fn prunes_block_hash_mappings() { new_test_ext().execute_with(|| { // simulate import of 15 blocks for n in 1..=15 { - System::initialize( - &n, - &[n as u8 - 1; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&n, &[n as u8 - 1; 32].into(), &Default::default(), InitKind::Full); System::finalize(); } // first 5 block hashes are pruned for n in 0..5 { - assert_eq!( - System::block_hash(n), - H256::zero(), - ); + assert_eq!(System::block_hash(n), H256::zero(),); } // the remaining 10 are kept for n in 5..15 { - assert_eq!( - System::block_hash(n), - [n as u8; 32].into(), - ); + assert_eq!(System::block_hash(n), [n as u8; 32].into(),); } }) } @@ -453,10 +388,7 @@ fn set_code_checks_works() { let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::ReadRuntimeVersionExt::new(read_runtime_version)); ext.execute_with(|| { - let res = System::set_code( - RawOrigin::Root.into(), - vec![1, 2, 3, 4], - ); + let res = System::set_code(RawOrigin::Root.into(), vec![1, 2, 3, 4]); assert_eq!(expected.map_err(DispatchErrorWithPostInfo::from), res); }); @@ -473,7 +405,8 @@ fn set_code_with_real_wasm_blob() { System::set_code( RawOrigin::Root.into(), substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(), - ).unwrap(); + ) + .unwrap(); assert_eq!( System::events(), @@ -496,9 +429,10 @@ fn runtime_upgraded_with_set_storage() { RawOrigin::Root.into(), vec![( well_known_keys::CODE.to_vec(), - substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec() + substrate_test_runtime_client::runtime::wasm_binary_unwrap().to_vec(), )], - ).unwrap(); + ) + .unwrap(); }); } @@ -531,20 +465,12 @@ fn ensure_one_of_works() { #[test] fn extrinsics_root_is_calculated_correctly() { new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); + System::initialize(&1, &[0u8; 32].into(), &Default::default(), InitKind::Full); System::note_finished_initialize(); System::note_extrinsic(vec![1]); System::note_applied_extrinsic(&Ok(().into()), Default::default()); System::note_extrinsic(vec![2]); - System::note_applied_extrinsic( - &Err(DispatchError::BadOrigin.into()), - Default::default() - ); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin.into()), Default::default()); System::note_finished_extrinsics(); let header = System::finalize(); diff --git a/substrate/frame/system/src/weights.rs b/substrate/frame/system/src/weights.rs index c6284ba17d63fd27453b6045fcd88d7bab963172..89fc63fab84479b52625d200aaccdfe18c51a24e 100644 --- a/substrate/frame/system/src/weights.rs +++ b/substrate/frame/system/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/timestamp/src/benchmarking.rs b/substrate/frame/timestamp/src/benchmarking.rs index 5d0178dc148465cc6b61d5c9062b465537428cef..84391380da832175932df4a7764e77f1ae235e32 100644 --- a/substrate/frame/timestamp/src/benchmarking.rs +++ b/substrate/frame/timestamp/src/benchmarking.rs @@ -20,9 +20,9 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, TrackedStorageKey}; use frame_support::{ensure, traits::OnFinalize}; -use frame_benchmarking::{benchmarks, TrackedStorageKey, impl_benchmark_test_suite}; +use frame_system::RawOrigin; use crate::Pallet as Timestamp; @@ -57,8 +57,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Timestamp, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Timestamp, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/timestamp/src/lib.rs b/substrate/frame/timestamp/src/lib.rs index e9b6388340b2f1de4bbd0cd57744e40ce080911b..247520297d2488157d1ea1136e9606b008d2a120 100644 --- a/substrate/frame/timestamp/src/lib.rs +++ b/substrate/frame/timestamp/src/lib.rs @@ -95,28 +95,30 @@ mod benchmarking; pub mod weights; -use sp_std::{result, cmp}; -use frame_support::traits::{Time, UnixTime, OnTimestampSet}; -use sp_runtime::traits::{AtLeast32Bit, Zero, SaturatedConversion, Scale}; -use sp_timestamp::{ - InherentError, INHERENT_IDENTIFIER, InherentType, -}; +use frame_support::traits::{OnTimestampSet, Time, UnixTime}; +use sp_runtime::traits::{AtLeast32Bit, SaturatedConversion, Scale, Zero}; +use sp_std::{cmp, result}; +use sp_timestamp::{InherentError, InherentType, INHERENT_IDENTIFIER}; pub use weights::WeightInfo; pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; /// The pallet configuration trait #[pallet::config] pub trait Config: frame_system::Config { /// Type used for expressing timestamp. - type Moment: Parameter + Default + AtLeast32Bit - + Scale + Copy + MaxEncodedLen; + type Moment: Parameter + + Default + + AtLeast32Bit + + Scale + + Copy + + MaxEncodedLen; /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. type OnTimestampSet: OnTimestampSet; @@ -208,7 +210,8 @@ pub mod pallet { const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(data: &InherentData) -> Option { - let inherent_data = data.get_data::(&INHERENT_IDENTIFIER) + let inherent_data = data + .get_data::(&INHERENT_IDENTIFIER) .expect("Timestamp inherent data not correctly encoded") .expect("Timestamp inherent data must be provided"); let data = (*inherent_data).saturated_into::(); @@ -217,7 +220,10 @@ pub mod pallet { Some(Call::set(next_time.into())) } - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + fn check_inherent( + call: &Self::Call, + data: &InherentData, + ) -> result::Result<(), Self::Error> { const MAX_TIMESTAMP_DRIFT_MILLIS: sp_timestamp::Timestamp = sp_timestamp::Timestamp::new(30 * 1000); @@ -226,7 +232,8 @@ pub mod pallet { _ => return Ok(()), }; - let data = data.get_data::(&INHERENT_IDENTIFIER) + let data = data + .get_data::(&INHERENT_IDENTIFIER) .expect("Timestamp inherent data not correctly encoded") .expect("Timestamp inherent data must be provided"); @@ -293,13 +300,16 @@ impl UnixTime for Pallet { #[cfg(test)] mod tests { - use crate as pallet_timestamp; use super::*; + use crate as pallet_timestamp; use frame_support::{assert_ok, parameter_types}; - use sp_io::TestExternalities; use sp_core::H256; - use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + use sp_io::TestExternalities; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + }; pub fn new_test_ext() -> TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); @@ -380,7 +390,9 @@ mod tests { } #[test] - #[should_panic(expected = "Timestamp must increment by at least between sequential blocks")] + #[should_panic( + expected = "Timestamp must increment by at least between sequential blocks" + )] fn block_period_minimum_enforced() { new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); diff --git a/substrate/frame/timestamp/src/weights.rs b/substrate/frame/timestamp/src/weights.rs index cf4fa6ea3d639c215efc39bae94ce213e8eb9ad5..a3fe6f198346afe6b14bb5e7bb4c4a380ac1bbc6 100644 --- a/substrate/frame/timestamp/src/weights.rs +++ b/substrate/frame/timestamp/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/tips/src/benchmarking.rs b/substrate/frame/tips/src/benchmarking.rs index 6c304fabb5a256f30eeded6d364cc843c80084b8..794a6815b3a3c4a72ef60fb778af41662fcd26e5 100644 --- a/substrate/frame/tips/src/benchmarking.rs +++ b/substrate/frame/tips/src/benchmarking.rs @@ -21,8 +21,8 @@ use super::*; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Saturating; use crate::Module as TipsMod; @@ -32,9 +32,9 @@ const SEED: u32 = 0; // Create the pre-requisite information needed to create a `report_awesome`. fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { let caller = whitelisted_caller(); - let value = T::TipReportDepositBase::get() - + T::DataDepositPerByte::get() * length.into() - + T::Currency::minimum_balance(); + let value = T::TipReportDepositBase::get() + + T::DataDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); let _ = T::Currency::make_free_balance_be(&caller, value); let reason = vec![0; length as usize]; let awesome_person = account("awesome", 0, SEED); @@ -42,12 +42,13 @@ fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId } // Create the pre-requisite information needed to call `tip_new`. -fn setup_tip(r: u32, t: u32) -> - Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> -{ +fn setup_tip( + r: u32, + t: u32, +) -> Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> { let tippers_count = T::Tippers::count(); - for i in 0 .. t { + for i in 0..t { let member = account("member", i, SEED); T::Tippers::add(&member); ensure!(T::Tippers::contains(&member), "failed to add tipper"); @@ -63,10 +64,8 @@ fn setup_tip(r: u32, t: u32) -> // Create `t` new tips for the tip proposal with `hash`. // This function automatically makes the tip able to close. -fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> - Result<(), &'static str> -{ - for i in 0 .. t { +fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result<(), &'static str> { + for i in 0..t { let caller = account("member", i, SEED); ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); TipsMod::::tip(RawOrigin::Signed(caller).into(), hash, value)?; @@ -193,8 +192,4 @@ benchmarks! { }: _(RawOrigin::Root, hash) } -impl_benchmark_test_suite!( - TipsMod, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/tips/src/lib.rs b/substrate/frame/tips/src/lib.rs index e57f0d7b8df05dd7160b630b73ea20231b685cd5..e8b5544bd6642b664c027dcce0cbba710876e491 100644 --- a/substrate/frame/tips/src/lib.rs +++ b/substrate/frame/tips/src/lib.rs @@ -54,23 +54,24 @@ #![cfg_attr(not(feature = "std"), no_std)] -mod tests; mod benchmarking; +mod tests; pub mod weights; -use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, decl_event, ensure, decl_error, Parameter}; -use frame_support::traits::{ - Currency, Get, ExistenceRequirement::{KeepAlive}, - ReservableCurrency +use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{Currency, ExistenceRequirement::KeepAlive, Get, ReservableCurrency}, + Parameter, }; +use sp_std::prelude::*; -use sp_runtime::{ Percent, RuntimeDebug, traits::{ - Zero, AccountIdConversion, Hash, BadOrigin -}}; -use frame_support::traits::{SortedMembers, ContainsLengthBound, OnUnbalanced, EnsureOrigin}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use frame_support::traits::{ContainsLengthBound, EnsureOrigin, OnUnbalanced, SortedMembers}; use frame_system::{self as system, ensure_signed}; +use sp_runtime::{ + traits::{AccountIdConversion, BadOrigin, Hash, Zero}, + Percent, RuntimeDebug, +}; pub use weights::WeightInfo; pub type BalanceOf = pallet_treasury::BalanceOf; @@ -484,9 +485,9 @@ impl Module { if m < a { continue } else { - break true; + break true } - } + }, } }); } @@ -495,7 +496,10 @@ impl Module { /// /// Up to three balance operations. /// Plus `O(T)` (`T` is Tippers length). - fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { + fn payout_tip( + hash: T::Hash, + tip: OpenTip, T::BlockNumber, T::Hash>, + ) { let mut tips = tip.tips; Self::retain_active_tips(&mut tips); tips.sort_by_key(|i| i.1); @@ -549,22 +553,18 @@ impl Module { tips: Vec<(AccountId, Balance)>, } - use frame_support::{Twox64Concat, migration::storage_key_iter}; + use frame_support::{migration::storage_key_iter, Twox64Concat}; for (hash, old_tip) in storage_key_iter::< T::Hash, OldOpenTip, T::BlockNumber, T::Hash>, Twox64Concat, - >(b"Treasury", b"Tips").drain() + >(b"Treasury", b"Tips") + .drain() { - let (finder, deposit, finders_fee) = match old_tip.finder { - Some((finder, deposit)) => { - (finder, deposit, true) - }, - None => { - (T::AccountId::default(), Zero::zero(), false) - }, + Some((finder, deposit)) => (finder, deposit, true), + None => (T::AccountId::default(), Zero::zero(), false), }; let new_tip = OpenTip { reason: old_tip.reason, @@ -573,7 +573,7 @@ impl Module { deposit, closes: old_tip.closes, tips: old_tip.tips, - finders_fee + finders_fee, }; Tips::::insert(hash, new_tip) } diff --git a/substrate/frame/tips/src/tests.rs b/substrate/frame/tips/src/tests.rs index 7cf4c31a6495c3335606132bbcc3e1b71e47fbe6..eb52acf8026b8e388c5cc5c4e8a79c215ad29e45 100644 --- a/substrate/frame/tips/src/tests.rs +++ b/substrate/frame/tips/src/tests.rs @@ -19,21 +19,19 @@ #![cfg(test)] -use crate as tips; use super::*; -use std::cell::RefCell; +use crate as tips; use frame_support::{ - assert_noop, assert_ok, parameter_types, - weights::Weight, traits::SortedMembers, - PalletId, pallet_prelude::GenesisBuild, + assert_noop, assert_ok, pallet_prelude::GenesisBuild, parameter_types, traits::SortedMembers, + weights::Weight, PalletId, }; -use sp_runtime::Permill; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, Permill, }; +use std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -102,9 +100,7 @@ thread_local! { pub struct TenToFourteen; impl SortedMembers for TenToFourteen { fn sorted_members() -> Vec { - TEN_TO_FOURTEEN.with(|v| { - v.borrow().clone() - }) + TEN_TO_FOURTEEN.with(|v| v.borrow().clone()) } #[cfg(feature = "runtime-benchmarks")] fn add(new: &u128) { @@ -119,7 +115,9 @@ impl ContainsLengthBound for TenToFourteen { fn max_len() -> usize { TEN_TO_FOURTEEN.with(|v| v.borrow().len()) } - fn min_len() -> usize { 0 } + fn min_len() -> usize { + 0 + } } parameter_types! { pub const ProposalBond: Permill = Permill::from_percent(5); @@ -142,7 +140,7 @@ impl pallet_treasury::Config for Test { type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; - type BurnDestination = (); // Just gets burned. + type BurnDestination = (); // Just gets burned. type WeightInfo = (); type SpendFunds = (); type MaxApprovals = MaxApprovals; @@ -165,19 +163,21 @@ impl Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); t.into() } fn last_event() -> RawEvent { - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::TipsModTestInst(inner) = e { Some(inner) } else { None } - }) + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::TipsModTestInst(inner) = e { Some(inner) } else { None }) .last() .unwrap() } @@ -267,13 +267,19 @@ fn close_tip_works() { assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); - assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); + assert_noop!( + TipsModTestInst::close_tip(Origin::signed(0), h.into()), + Error::::StillOpen + ); assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); assert_eq!(last_event(), RawEvent::TipClosing(h)); - assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::Premature); + assert_noop!( + TipsModTestInst::close_tip(Origin::signed(0), h.into()), + Error::::Premature + ); System::set_block_number(2); assert_noop!(TipsModTestInst::close_tip(Origin::none(), h.into()), BadOrigin); @@ -282,7 +288,10 @@ fn close_tip_works() { assert_eq!(last_event(), RawEvent::TipClosed(h, 3, 10)); - assert_noop!(TipsModTestInst::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); + assert_noop!( + TipsModTestInst::close_tip(Origin::signed(100), h.into()), + Error::::UnknownTip + ); }); } @@ -305,10 +314,7 @@ fn slash_tip_works() { assert_eq!(last_event(), RawEvent::NewTip(h)); // can't remove from any origin - assert_noop!( - TipsModTestInst::slash_tip(Origin::signed(0), h.clone()), - BadOrigin, - ); + assert_noop!(TipsModTestInst::slash_tip(Origin::signed(0), h.clone()), BadOrigin,); // can remove from root. assert_ok!(TipsModTestInst::slash_tip(Origin::root(), h.clone())); @@ -330,10 +336,16 @@ fn retract_tip_works() { assert_ok!(TipsModTestInst::tip(Origin::signed(10), h.clone(), 10)); assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(TipsModTestInst::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); + assert_noop!( + TipsModTestInst::retract_tip(Origin::signed(10), h.clone()), + Error::::NotFinder + ); assert_ok!(TipsModTestInst::retract_tip(Origin::signed(0), h.clone())); System::set_block_number(2); - assert_noop!(TipsModTestInst::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + assert_noop!( + TipsModTestInst::close_tip(Origin::signed(0), h.into()), + Error::::UnknownTip + ); // with tip new Balances::make_free_balance_be(&Treasury::account_id(), 101); @@ -341,10 +353,16 @@ fn retract_tip_works() { let h = tip_hash(); assert_ok!(TipsModTestInst::tip(Origin::signed(11), h.clone(), 10)); assert_ok!(TipsModTestInst::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(TipsModTestInst::retract_tip(Origin::signed(0), h.clone()), Error::::NotFinder); + assert_noop!( + TipsModTestInst::retract_tip(Origin::signed(0), h.clone()), + Error::::NotFinder + ); assert_ok!(TipsModTestInst::retract_tip(Origin::signed(10), h.clone())); System::set_block_number(2); - assert_noop!(TipsModTestInst::close_tip(Origin::signed(10), h.into()), Error::::UnknownTip); + assert_noop!( + TipsModTestInst::close_tip(Origin::signed(10), h.into()), + Error::::UnknownTip + ); }); } @@ -416,7 +434,7 @@ fn test_last_reward_migration() { who: 10, finder: Some((20, 30)), closes: Some(13), - tips: vec![(40, 50), (60, 70)] + tips: vec![(40, 50), (60, 70)], }; let reason2 = BlakeTwo256::hash(b"reason2"); @@ -427,24 +445,17 @@ fn test_last_reward_migration() { who: 20, finder: None, closes: Some(13), - tips: vec![(40, 50), (60, 70)] + tips: vec![(40, 50), (60, 70)], }; let data = vec![ - ( - Tips::::hashed_key_for(hash1), - old_tip_finder.encode().to_vec() - ), - ( - Tips::::hashed_key_for(hash2), - old_tip_no_finder.encode().to_vec() - ), + (Tips::::hashed_key_for(hash1), old_tip_finder.encode().to_vec()), + (Tips::::hashed_key_for(hash2), old_tip_no_finder.encode().to_vec()), ]; s.top = data.into_iter().collect(); sp_io::TestExternalities::new(s).execute_with(|| { - TipsModTestInst::migrate_retract_tip_for_tip_new(); // Test w/ finder @@ -481,10 +492,12 @@ fn test_last_reward_migration() { fn genesis_funding_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let initial_funding = 100; - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized with 100. balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); GenesisBuild::::assimilate_storage(&pallet_treasury::GenesisConfig, &mut t).unwrap(); let mut t: sp_io::TestExternalities = t.into(); diff --git a/substrate/frame/tips/src/weights.rs b/substrate/frame/tips/src/weights.rs index ceee79bd6f07e2a163c0769f318d10290c23c7d8..439c7f976c12a809899728247ff6bdb3152d3f86 100644 --- a/substrate/frame/tips/src/weights.rs +++ b/substrate/frame/tips/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/transaction-payment/rpc/src/lib.rs b/substrate/frame/transaction-payment/rpc/src/lib.rs index efe9f010d139bc4d440b850126bf61bc8b7e6b2d..945156d12a6a4519aafc81ed45f806bb9d617017 100644 --- a/substrate/frame/transaction-payment/rpc/src/lib.rs +++ b/substrate/frame/transaction-payment/rpc/src/lib.rs @@ -17,33 +17,31 @@ //! RPC interface for the transaction payment pallet. -use std::sync::Arc; -use std::convert::TryInto; +pub use self::gen_client::Client as TransactionPaymentClient; use codec::{Codec, Decode}; -use sp_blockchain::HeaderBackend; use jsonrpc_core::{Error as RpcError, ErrorCode, Result}; use jsonrpc_derive::rpc; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, MaybeDisplay}}; +pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; +use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_rpc::number::NumberOrHex; -use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; -pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; -pub use self::gen_client::Client as TransactionPaymentClient; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, MaybeDisplay}, +}; +use std::{convert::TryInto, sync::Arc}; #[rpc] pub trait TransactionPaymentApi { #[rpc(name = "payment_queryInfo")] - fn query_info( - &self, - encoded_xt: Bytes, - at: Option - ) -> Result; + fn query_info(&self, encoded_xt: Bytes, at: Option) -> Result; #[rpc(name = "payment_queryFeeDetails")] fn query_fee_details( &self, encoded_xt: Bytes, - at: Option + at: Option, ) -> Result>; } @@ -77,10 +75,8 @@ impl From for i64 { } } -impl TransactionPaymentApi< - ::Hash, - RuntimeDispatchInfo, -> for TransactionPayment +impl TransactionPaymentApi<::Hash, RuntimeDispatchInfo> + for TransactionPayment where Block: BlockT, C: 'static + ProvideRuntimeApi + HeaderBackend, @@ -90,13 +86,12 @@ where fn query_info( &self, encoded_xt: Bytes, - at: Option<::Hash> + at: Option<::Hash>, ) -> Result> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. - self.client.info().best_hash - )); + self.client.info().best_hash)); let encoded_len = encoded_xt.len() as u32; @@ -120,8 +115,7 @@ where let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. - self.client.info().best_hash - )); + self.client.info().best_hash)); let encoded_len = encoded_xt.len() as u32; @@ -136,11 +130,13 @@ where data: Some(format!("{:?}", e).into()), })?; - let try_into_rpc_balance = |value: Balance| value.try_into().map_err(|_| RpcError { - code: ErrorCode::InvalidParams, - message: format!("{} doesn't fit in NumberOrHex representation", value), - data: None, - }); + let try_into_rpc_balance = |value: Balance| { + value.try_into().map_err(|_| RpcError { + code: ErrorCode::InvalidParams, + message: format!("{} doesn't fit in NumberOrHex representation", value), + data: None, + }) + }; Ok(FeeDetails { inclusion_fee: if let Some(inclusion_fee) = fee_details.inclusion_fee { diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index 25fce83e69930eed5496386002691d2061ec95e3..882f37dceedf376a96ed939de5abf8576a44988f 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -47,27 +47,27 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_runtime::{ - FixedU128, FixedPointNumber, FixedPointOperand, Perquintill, RuntimeDebug, - transaction_validity::{ - TransactionPriority, ValidTransaction, TransactionValidityError, TransactionValidity, - }, traits::{ - Saturating, SignedExtension, SaturatedConversion, Convert, Dispatchable, - DispatchInfoOf, PostDispatchInfoOf, + Convert, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SaturatedConversion, Saturating, + SignedExtension, }, + transaction_validity::{ + TransactionPriority, TransactionValidity, TransactionValidityError, ValidTransaction, + }, + FixedPointNumber, FixedPointOperand, FixedU128, Perquintill, RuntimeDebug, }; use sp_std::prelude::*; use frame_support::{ + dispatch::DispatchResult, traits::Get, weights::{ - Weight, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Pays, WeightToFeePolynomial, - WeightToFeeCoefficient, DispatchClass, + DispatchClass, DispatchInfo, GetDispatchInfo, Pays, PostDispatchInfo, Weight, + WeightToFeeCoefficient, WeightToFeePolynomial, }, - dispatch::DispatchResult, }; mod payment; @@ -75,7 +75,7 @@ mod types; pub use pallet::*; pub use payment::*; -pub use types::{InclusionFee, FeeDetails, RuntimeDispatchInfo}; +pub use types::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; /// Fee multiplier. pub type Multiplier = FixedU128; @@ -91,11 +91,11 @@ type BalanceOf = <::OnChargeTransaction as OnChargeTransaction MultiplierUpdate for TargetedFeeAdjustment - where T: frame_system::Config, S: Get, V: Get, M: Get, +where + T: frame_system::Config, + S: Get, + V: Get, + M: Get, { fn min() -> Multiplier { M::get() @@ -166,7 +170,11 @@ impl MultiplierUpdate for TargetedFeeAdjustment } impl Convert for TargetedFeeAdjustment - where T: frame_system::Config, S: Get, V: Get, M: Get, +where + T: frame_system::Config, + S: Get, + V: Get, + M: Get, { fn convert(previous: Multiplier) -> Multiplier { // Defensive only. The multiplier in storage should always be at most positive. Nonetheless @@ -177,12 +185,13 @@ impl Convert for TargetedFeeAdjustment>::block_weight(); - let normal_block_weight = *current_block_weight - .get(DispatchClass::Normal) - .min(&normal_max_weight); + let normal_block_weight = + *current_block_weight.get(DispatchClass::Normal).min(&normal_max_weight); let s = S::get(); let v = V::get(); @@ -232,9 +241,9 @@ impl Default for Releases { #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -263,7 +272,7 @@ pub mod pallet { #[pallet::extra_constants] impl Pallet { - //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. #[allow(non_snake_case)] /// The polynomial that is applied in order to derive fee from weight. fn WeightToFee() -> Vec>> { @@ -272,16 +281,14 @@ pub mod pallet { } #[pallet::type_value] - pub fn NextFeeMultiplierOnEmpty() -> Multiplier { Multiplier::saturating_from_integer(1) } + pub fn NextFeeMultiplierOnEmpty() -> Multiplier { + Multiplier::saturating_from_integer(1) + } #[pallet::storage] #[pallet::getter(fn next_fee_multiplier)] - pub type NextFeeMultiplier = StorageValue< - _, - Multiplier, - ValueQuery, - NextFeeMultiplierOnEmpty - >; + pub type NextFeeMultiplier = + StorageValue<_, Multiplier, ValueQuery, NextFeeMultiplierOnEmpty>; #[pallet::storage] pub(super) type StorageVersion = StorageValue<_, Releases, ValueQuery>; @@ -318,9 +325,10 @@ pub mod pallet { use sp_std::convert::TryInto; assert!( ::max_value() >= - Multiplier::checked_from_integer( - T::BlockWeights::get().max_block.try_into().unwrap() - ).unwrap(), + Multiplier::checked_from_integer( + T::BlockWeights::get().max_block.try_into().unwrap() + ) + .unwrap(), ); // This is the minimum value of the multiplier. Make sure that if we collapse to this @@ -331,13 +339,13 @@ pub mod pallet { let mut target = T::FeeMultiplierUpdate::target() * T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( "Setting `max_total` for `Normal` dispatch class is not compatible with \ - `transaction-payment` pallet." + `transaction-payment` pallet.", ); // add 1 percent; let addition = target / 100; if addition == 0 { // this is most likely because in a test setup we set everything to (). - return; + return } target += addition; @@ -345,7 +353,9 @@ pub mod pallet { sp_io::TestExternalities::new_empty().execute_with(|| { >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); - assert!(next > min_value, "The minimum bound of the multiplier is too low. When \ + assert!( + next > min_value, + "The minimum bound of the multiplier is too low. When \ block saturation is more than target by 1% and multiplier is minimal then \ the multiplier doesn't increase." ); @@ -354,8 +364,9 @@ pub mod pallet { } } -impl Pallet where - BalanceOf: FixedPointOperand +impl Pallet +where + BalanceOf: FixedPointOperand, { /// Query the data that we know about the fee of a given `call`. /// @@ -398,11 +409,8 @@ impl Pallet where } /// Compute the final fee value for a particular transaction. - pub fn compute_fee( - len: u32, - info: &DispatchInfoOf, - tip: BalanceOf, - ) -> BalanceOf where + pub fn compute_fee(len: u32, info: &DispatchInfoOf, tip: BalanceOf) -> BalanceOf + where T::Call: Dispatchable, { Self::compute_fee_details(len, info, tip).final_fee() @@ -413,7 +421,8 @@ impl Pallet where len: u32, info: &DispatchInfoOf, tip: BalanceOf, - ) -> FeeDetails> where + ) -> FeeDetails> + where T::Call: Dispatchable, { Self::compute_fee_raw(len, info.weight, tip, info.pays_fee, info.class) @@ -428,7 +437,8 @@ impl Pallet where info: &DispatchInfoOf, post_info: &PostDispatchInfoOf, tip: BalanceOf, - ) -> BalanceOf where + ) -> BalanceOf + where T::Call: Dispatchable, { Self::compute_actual_fee_details(len, info, post_info, tip).final_fee() @@ -440,7 +450,8 @@ impl Pallet where info: &DispatchInfoOf, post_info: &PostDispatchInfoOf, tip: BalanceOf, - ) -> FeeDetails> where + ) -> FeeDetails> + where T::Call: Dispatchable, { Self::compute_fee_raw( @@ -477,15 +488,12 @@ impl Pallet where inclusion_fee: Some(InclusionFee { base_fee, len_fee: fixed_len_fee, - adjusted_weight_fee + adjusted_weight_fee, }), - tip + tip, } } else { - FeeDetails { - inclusion_fee: None, - tip - } + FeeDetails { inclusion_fee: None, tip } } } @@ -497,7 +505,8 @@ impl Pallet where } } -impl Convert> for Pallet where +impl Convert> for Pallet +where T: Config, BalanceOf: FixedPointOperand, { @@ -516,7 +525,8 @@ impl Convert> for Pallet where #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); -impl ChargeTransactionPayment where +impl ChargeTransactionPayment +where T::Call: Dispatchable, BalanceOf: Send + Sync + FixedPointOperand, { @@ -546,8 +556,10 @@ impl ChargeTransactionPayment where let tip = self.0; let fee = Pallet::::compute_fee(len as u32, info, tip); - <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee(who, call, info, fee, tip) - .map(|i| (fee, i)) + <::OnChargeTransaction as OnChargeTransaction>::withdraw_fee( + who, call, info, fee, tip, + ) + .map(|i| (fee, i)) } /// Get an appropriate priority for a transaction with the given length and info. @@ -560,11 +572,16 @@ impl ChargeTransactionPayment where /// and the entire block weight `(1/1)`, its priority is `fee * min(1, 4) = fee * 1`. This means /// that the transaction which consumes more resources (either length or weight) with the same /// `fee` ends up having lower priority. - fn get_priority(len: usize, info: &DispatchInfoOf, final_fee: BalanceOf) -> TransactionPriority { + fn get_priority( + len: usize, + info: &DispatchInfoOf, + final_fee: BalanceOf, + ) -> TransactionPriority { let weight_saturation = T::BlockWeights::get().max_block / info.weight.max(1); let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Normal); let len_saturation = max_block_length as u64 / (len as u64).max(1); - let coefficient: BalanceOf = weight_saturation.min(len_saturation).saturated_into::>(); + let coefficient: BalanceOf = + weight_saturation.min(len_saturation).saturated_into::>(); final_fee.saturating_mul(coefficient).saturated_into::() } } @@ -580,7 +597,8 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment { } } -impl SignedExtension for ChargeTransactionPayment where +impl SignedExtension for ChargeTransactionPayment +where BalanceOf: Send + Sync + From + FixedPointOperand, T::Call: Dispatchable, { @@ -596,7 +614,9 @@ impl SignedExtension for ChargeTransactionPayment where // imbalance resulting from withdrawing the fee <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } fn validate( &self, @@ -606,10 +626,7 @@ impl SignedExtension for ChargeTransactionPayment where len: usize, ) -> TransactionValidity { let (fee, _) = self.withdraw_fee(who, call, info, len)?; - Ok(ValidTransaction { - priority: Self::get_priority(len, info, fee), - ..Default::default() - }) + Ok(ValidTransaction { priority: Self::get_priority(len, info, fee), ..Default::default() }) } fn pre_dispatch( @@ -617,7 +634,7 @@ impl SignedExtension for ChargeTransactionPayment where who: &Self::AccountId, call: &Self::Call, info: &DispatchInfoOf, - len: usize + len: usize, ) -> Result { let (_fee, imbalance) = self.withdraw_fee(who, call, info, len)?; Ok((self.0, who.clone(), imbalance)) @@ -631,13 +648,10 @@ impl SignedExtension for ChargeTransactionPayment where _result: &DispatchResult, ) -> Result<(), TransactionValidityError> { let (tip, who, imbalance) = pre; - let actual_fee = Pallet::::compute_actual_fee( - len as u32, - info, - post_info, - tip, - ); - T::OnChargeTransaction::correct_and_deposit_fee(&who, info, post_info, actual_fee, tip, imbalance)?; + let actual_fee = Pallet::::compute_actual_fee(len as u32, info, post_info, tip); + T::OnChargeTransaction::correct_and_deposit_fee( + &who, info, post_info, actual_fee, tip, imbalance, + )?; Ok(()) } } @@ -662,11 +676,11 @@ mod tests { use frame_support::{ assert_noop, assert_ok, parameter_types, + traits::{Currency, Imbalance, OnUnbalanced}, weights::{ - DispatchClass, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Weight, - WeightToFeePolynomial, WeightToFeeCoefficients, WeightToFeeCoefficient, + DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo, Weight, + WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, }, - traits::{Currency, OnUnbalanced, Imbalance}, }; use frame_system as system; use pallet_balances::Call as BalancesCall; @@ -777,7 +791,7 @@ mod tests { pub struct DealWithFees; impl OnUnbalanced> for DealWithFees { fn on_unbalanceds( - mut fees_then_tips: impl Iterator> + mut fees_then_tips: impl Iterator>, ) { if let Some(fees) = fees_then_tips.next() { FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() += fees.peek()); @@ -799,17 +813,12 @@ mod tests { balance_factor: u64, base_weight: u64, byte_fee: u64, - weight_to_fee: u64 + weight_to_fee: u64, } impl Default for ExtBuilder { fn default() -> Self { - Self { - balance_factor: 1, - base_weight: 0, - byte_fee: 1, - weight_to_fee: 1, - } + Self { balance_factor: 1, base_weight: 0, byte_fee: 1, weight_to_fee: 1 } } } @@ -846,12 +855,14 @@ mod tests { (3, 30 * self.balance_factor), (4, 40 * self.balance_factor), (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) + (6, 60 * self.balance_factor), ] } else { vec![] }, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } } @@ -863,24 +874,15 @@ mod tests { } fn post_info_from_weight(w: Weight) -> PostDispatchInfo { - PostDispatchInfo { - actual_weight: Some(w), - pays_fee: Default::default(), - } + PostDispatchInfo { actual_weight: Some(w), pays_fee: Default::default() } } fn post_info_from_pays(p: Pays) -> PostDispatchInfo { - PostDispatchInfo { - actual_weight: None, - pays_fee: p, - } + PostDispatchInfo { actual_weight: None, pays_fee: p } } fn default_post_info() -> PostDispatchInfo { - PostDispatchInfo { - actual_weight: None, - pays_fee: Default::default(), - } + PostDispatchInfo { actual_weight: None, pays_fee: Default::default() } } #[test] @@ -889,37 +891,42 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - let len = 10; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) - .unwrap(); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - assert_ok!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(5), &default_post_info(), len, &Ok(())) - ); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 5 + 10); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 0); - - FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); - - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - ); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 10 + 50); - assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5); - }); + .execute_with(|| { + let len = 10; + let pre = ChargeTransactionPayment::::from(0) + .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .unwrap(); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(5), + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 5 + 10); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 0); + + FEE_UNBALANCED_AMOUNT.with(|a| *a.borrow_mut() = 0); + + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); + assert_eq!(FEE_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5 + 10 + 50); + assert_eq!(TIP_UNBALANCED_AMOUNT.with(|a| a.borrow().clone()), 5); + }); } #[test] @@ -928,39 +935,38 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - let len = 10; - >::put(Multiplier::saturating_from_rational(3, 2)); - - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); - - assert_ok!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - ); - // 75 (3/2 of the returned 50 units of weight) is refunded - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); - }); + .execute_with(|| { + let len = 10; + >::put(Multiplier::saturating_from_rational(3, 2)); + + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + )); + // 75 (3/2 of the returned 50 units of weight) is refunded + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); + }); } #[test] fn signed_extension_transaction_payment_is_bounded() { - ExtBuilder::default() - .balance_factor(1000) - .byte_fee(0) - .build() - .execute_with(|| - { + ExtBuilder::default().balance_factor(1000).byte_fee(0).build().execute_with(|| { // maximum weight possible - assert_ok!( - ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::max_value()), 10) - ); + assert_ok!(ChargeTransactionPayment::::from(0).pre_dispatch( + &1, + CALL, + &info_from_weight(Weight::max_value()), + 10 + )); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( Balances::free_balance(&1), @@ -975,36 +981,38 @@ mod tests { .base_weight(100) .balance_factor(0) .build() - .execute_with(|| - { - // 1 ain't have a penny. - assert_eq!(Balances::free_balance(1), 0); - - let len = 100; - - // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. - let operational_transaction = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::No, - }; - assert_ok!( - ChargeTransactionPayment::::from(0) - .validate(&1, CALL, &operational_transaction , len) - ); - - // like a InsecureFreeNormal - let free_transaction = DispatchInfo { - weight: 0, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }; - assert_noop!( - ChargeTransactionPayment::::from(0) - .validate(&1, CALL, &free_transaction , len), - TransactionValidityError::Invalid(InvalidTransaction::Payment), - ); - }); + .execute_with(|| { + // 1 ain't have a penny. + assert_eq!(Balances::free_balance(1), 0); + + let len = 100; + + // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. + let operational_transaction = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::No, + }; + assert_ok!(ChargeTransactionPayment::::from(0).validate( + &1, + CALL, + &operational_transaction, + len + )); + + // like a InsecureFreeNormal + let free_transaction = + DispatchInfo { weight: 0, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + assert_noop!( + ChargeTransactionPayment::::from(0).validate( + &1, + CALL, + &free_transaction, + len + ), + TransactionValidityError::Invalid(InvalidTransaction::Payment), + ); + }); } #[test] @@ -1013,25 +1021,22 @@ mod tests { .base_weight(5) .balance_factor(10) .build() - .execute_with(|| - { - // all fees should be x1.5 - >::put(Multiplier::saturating_from_rational(3, 2)); - let len = 10; - - assert_ok!( - ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(3), len) - ); - assert_eq!( - Balances::free_balance(1), - 100 // original + .execute_with(|| { + // all fees should be x1.5 + >::put(Multiplier::saturating_from_rational(3, 2)); + let len = 10; + + assert_ok!(ChargeTransactionPayment::::from(10) // tipped + .pre_dispatch(&1, CALL, &info_from_weight(3), len)); + assert_eq!( + Balances::free_balance(1), + 100 // original - 10 // tip - 5 // base - 10 // len - (3 * 3 / 2) // adjusted weight - ); - }) + ); + }) } #[test] @@ -1040,15 +1045,10 @@ mod tests { let origin = 111111; let extra = (); let xt = TestXt::new(call, Some((origin, extra))); - let info = xt.get_dispatch_info(); + let info = xt.get_dispatch_info(); let ext = xt.encode(); let len = ext.len() as u32; - ExtBuilder::default() - .base_weight(5) - .weight_fee(2) - .build() - .execute_with(|| - { + ExtBuilder::default().base_weight(5).weight_fee(2).build().execute_with(|| { // all fees should be x1.5 >::put(Multiplier::saturating_from_rational(3, 2)); @@ -1057,13 +1057,11 @@ mod tests { RuntimeDispatchInfo { weight: info.weight, class: info.class, - partial_fee: - 5 * 2 /* base * weight_fee */ + partial_fee: 5 * 2 /* base * weight_fee */ + len as u64 /* len * 1 */ + info.weight.min(BlockWeights::get().max_block) as u64 * 2 * 3 / 2 /* weight */ }, ); - }); } @@ -1074,37 +1072,36 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Next fee multiplier is zero - assert_eq!(>::get(), Multiplier::one()); - - // Tip only, no fees works - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::No, - }; - assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); - // No tip, only base fee works - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); - // Tip + base fee works - assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 69), 169); - // Len (byte fee) + base fee works - assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); - // Weight fee + base fee works - let dispatch_info = DispatchInfo { - weight: 1000, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 1100); - }); + .execute_with(|| { + // Next fee multiplier is zero + assert_eq!(>::get(), Multiplier::one()); + + // Tip only, no fees works + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::No, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 10), 10); + // No tip, only base fee works + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); + // Tip + base fee works + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 69), 169); + // Len (byte fee) + base fee works + assert_eq!(Pallet::::compute_fee(42, &dispatch_info, 0), 520); + // Weight fee + base fee works + let dispatch_info = DispatchInfo { + weight: 1000, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 1100); + }); } #[test] @@ -1114,30 +1111,29 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Add a next fee multiplier. Fees will be x3/2. - >::put(Multiplier::saturating_from_rational(3, 2)); - // Base fee is unaffected by multiplier - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); - - // Everything works together :) - let dispatch_info = DispatchInfo { - weight: 123, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - // 123 weight, 456 length, 100 base - assert_eq!( - Pallet::::compute_fee(456, &dispatch_info, 789), - 100 + (3 * 123 / 2) + 4560 + 789, - ); - }); + .execute_with(|| { + // Add a next fee multiplier. Fees will be x3/2. + >::put(Multiplier::saturating_from_rational(3, 2)); + // Base fee is unaffected by multiplier + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); + + // Everything works together :) + let dispatch_info = DispatchInfo { + weight: 123, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // 123 weight, 456 length, 100 base + assert_eq!( + Pallet::::compute_fee(456, &dispatch_info, 789), + 100 + (3 * 123 / 2) + 4560 + 789, + ); + }); } #[test] @@ -1147,31 +1143,30 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Add a next fee multiplier. All fees will be x1/2. - >::put(Multiplier::saturating_from_rational(1, 2)); - - // Base fee is unaffected by multiplier. - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); - - // Everything works together. - let dispatch_info = DispatchInfo { - weight: 123, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - // 123 weight, 456 length, 100 base - assert_eq!( - Pallet::::compute_fee(456, &dispatch_info, 789), - 100 + (123 / 2) + 4560 + 789, - ); - }); + .execute_with(|| { + // Add a next fee multiplier. All fees will be x1/2. + >::put(Multiplier::saturating_from_rational(1, 2)); + + // Base fee is unaffected by multiplier. + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!(Pallet::::compute_fee(0, &dispatch_info, 0), 100); + + // Everything works together. + let dispatch_info = DispatchInfo { + weight: 123, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + // 123 weight, 456 length, 100 base + assert_eq!( + Pallet::::compute_fee(456, &dispatch_info, 789), + 100 + (123 / 2) + 4560 + 789, + ); + }); } #[test] @@ -1181,23 +1176,18 @@ mod tests { .byte_fee(10) .balance_factor(0) .build() - .execute_with(|| - { - // Overflow is handled - let dispatch_info = DispatchInfo { - weight: Weight::max_value(), - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - assert_eq!( - Pallet::::compute_fee( - u32::MAX, - &dispatch_info, + .execute_with(|| { + // Overflow is handled + let dispatch_info = DispatchInfo { + weight: Weight::max_value(), + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + assert_eq!( + Pallet::::compute_fee(u32::MAX, &dispatch_info, u64::MAX), u64::MAX - ), - u64::MAX - ); - }); + ); + }); } #[test] @@ -1206,30 +1196,34 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - // So events are emitted - System::set_block_number(10); - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - // kill the account between pre and post dispatch - assert_ok!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2))); - assert_eq!(Balances::free_balance(2), 0); - - assert_ok!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - ); - assert_eq!(Balances::free_balance(2), 0); - // Transfer Event - System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer(2, 3, 80))); - // Killed Event - System::assert_has_event(Event::System(system::Event::KilledAccount(2))); - }); + .execute_with(|| { + // So events are emitted + System::set_block_number(10); + let len = 10; + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + // kill the account between pre and post dispatch + assert_ok!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2))); + assert_eq!(Balances::free_balance(2), 0); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), 0); + // Transfer Event + System::assert_has_event(Event::Balances(pallet_balances::Event::Transfer( + 2, 3, 80, + ))); + // Killed Event + System::assert_has_event(Event::System(system::Event::KilledAccount(2))); + }); } #[test] @@ -1238,20 +1232,22 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert_ok!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(101), len, &Ok(())) - ); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - }); + .execute_with(|| { + let len = 10; + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(101), + len, + &Ok(()) + )); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + }); } #[test] @@ -1260,29 +1256,28 @@ mod tests { .balance_factor(10) .base_weight(5) .build() - .execute_with(|| - { - // So events are emitted - System::set_block_number(10); - let len = 10; - let dispatch_info = DispatchInfo { - weight: 100, - pays_fee: Pays::No, - class: DispatchClass::Normal, - }; - let user = 69; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&user, CALL, &dispatch_info, len) - .unwrap(); - assert_eq!(Balances::total_balance(&user), 0); - assert_ok!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &dispatch_info, &default_post_info(), len, &Ok(())) - ); - assert_eq!(Balances::total_balance(&user), 0); - // No events for such a scenario - assert_eq!(System::events().len(), 0); - }); + .execute_with(|| { + // So events are emitted + System::set_block_number(10); + let len = 10; + let dispatch_info = + DispatchInfo { weight: 100, pays_fee: Pays::No, class: DispatchClass::Normal }; + let user = 69; + let pre = ChargeTransactionPayment::::from(0) + .pre_dispatch(&user, CALL, &dispatch_info, len) + .unwrap(); + assert_eq!(Balances::total_balance(&user), 0); + assert_ok!(ChargeTransactionPayment::::post_dispatch( + pre, + &dispatch_info, + &default_post_info(), + len, + &Ok(()) + )); + assert_eq!(Balances::total_balance(&user), 0); + // No events for such a scenario + assert_eq!(System::events().len(), 0); + }); } #[test] @@ -1291,32 +1286,36 @@ mod tests { .balance_factor(10) .base_weight(7) .build() - .execute_with(|| - { - let info = info_from_weight(100); - let post_info = post_info_from_weight(33); - let prev_balance = Balances::free_balance(2); - let len = 10; - let tip = 5; - - >::put(Multiplier::saturating_from_rational(5, 4)); - - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) - .unwrap(); - - ChargeTransactionPayment:: - ::post_dispatch(pre, &info, &post_info, len, &Ok(())) + .execute_with(|| { + let info = info_from_weight(100); + let post_info = post_info_from_weight(33); + let prev_balance = Balances::free_balance(2); + let len = 10; + let tip = 5; + + >::put(Multiplier::saturating_from_rational(5, 4)); + + let pre = ChargeTransactionPayment::::from(tip) + .pre_dispatch(&2, CALL, &info, len) + .unwrap(); + + ChargeTransactionPayment::::post_dispatch( + pre, + &info, + &post_info, + len, + &Ok(()), + ) .unwrap(); - let refund_based_fee = prev_balance - Balances::free_balance(2); - let actual_fee = Pallet:: - ::compute_actual_fee(len as u32, &info, &post_info, tip); + let refund_based_fee = prev_balance - Balances::free_balance(2); + let actual_fee = + Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); - // 33 weight, 10 length, 7 base, 5 tip - assert_eq!(actual_fee, 7 + 10 + (33 * 5 / 4) + 5); - assert_eq!(refund_based_fee, actual_fee); - }); + // 33 weight, 10 length, 7 base, 5 tip + assert_eq!(actual_fee, 7 + 10 + (33 * 5 / 4) + 5); + assert_eq!(refund_based_fee, actual_fee); + }); } #[test] @@ -1325,31 +1324,35 @@ mod tests { .balance_factor(10) .base_weight(7) .build() - .execute_with(|| - { - let info = info_from_weight(100); - let post_info = post_info_from_pays(Pays::No); - let prev_balance = Balances::free_balance(2); - let len = 10; - let tip = 5; - - >::put(Multiplier::saturating_from_rational(5, 4)); - - let pre = ChargeTransactionPayment::::from(tip) - .pre_dispatch(&2, CALL, &info, len) + .execute_with(|| { + let info = info_from_weight(100); + let post_info = post_info_from_pays(Pays::No); + let prev_balance = Balances::free_balance(2); + let len = 10; + let tip = 5; + + >::put(Multiplier::saturating_from_rational(5, 4)); + + let pre = ChargeTransactionPayment::::from(tip) + .pre_dispatch(&2, CALL, &info, len) + .unwrap(); + + ChargeTransactionPayment::::post_dispatch( + pre, + &info, + &post_info, + len, + &Ok(()), + ) .unwrap(); - ChargeTransactionPayment:: - ::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .unwrap(); - - let refund_based_fee = prev_balance - Balances::free_balance(2); - let actual_fee = Pallet:: - ::compute_actual_fee(len as u32, &info, &post_info, tip); + let refund_based_fee = prev_balance - Balances::free_balance(2); + let actual_fee = + Pallet::::compute_actual_fee(len as u32, &info, &post_info, tip); - // Only 5 tip is paid - assert_eq!(actual_fee, 5); - assert_eq!(refund_based_fee, actual_fee); - }); + // Only 5 tip is paid + assert_eq!(actual_fee, 5); + assert_eq!(refund_based_fee, actual_fee); + }); } } diff --git a/substrate/frame/transaction-payment/src/payment.rs b/substrate/frame/transaction-payment/src/payment.rs index 376cd77ce3f822b257604f2c1977f11a2cfe3557..832e4d5359a1c612990bdd1ddd7fb7d2326d68e6 100644 --- a/substrate/frame/transaction-payment/src/payment.rs +++ b/substrate/frame/transaction-payment/src/payment.rs @@ -1,10 +1,12 @@ -///! Traits and default implementation for paying transaction fees. - +/// ! Traits and default implementation for paying transaction fees. use crate::Config; use codec::FullCodec; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, DispatchInfoOf, MaybeSerializeDeserialize, PostDispatchInfoOf, Saturating, Zero}, + traits::{ + AtLeast32BitUnsigned, DispatchInfoOf, MaybeSerializeDeserialize, PostDispatchInfoOf, + Saturating, Zero, + }, transaction_validity::InvalidTransaction, }; use sp_std::{fmt::Debug, marker::PhantomData}; @@ -20,7 +22,12 @@ type NegativeImbalanceOf = /// Handle withdrawing, refunding and depositing of transaction fees. pub trait OnChargeTransaction { /// The underlying integer type in which fees are calculated. - type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default; + type Balance: AtLeast32BitUnsigned + + FullCodec + + Copy + + MaybeSerializeDeserialize + + Debug + + Default; type LiquidityInfo: Default; /// Before the transaction is executed the payment of the transaction fees @@ -67,10 +74,14 @@ where T: Config, T::TransactionByteFee: Get<::AccountId>>::Balance>, C: Currency<::AccountId>, - C::PositiveImbalance: - Imbalance<::AccountId>>::Balance, Opposite = C::NegativeImbalance>, - C::NegativeImbalance: - Imbalance<::AccountId>>::Balance, Opposite = C::PositiveImbalance>, + C::PositiveImbalance: Imbalance< + ::AccountId>>::Balance, + Opposite = C::NegativeImbalance, + >, + C::NegativeImbalance: Imbalance< + ::AccountId>>::Balance, + Opposite = C::PositiveImbalance, + >, OU: OnUnbalanced>, { type LiquidityInfo = Option>; @@ -87,7 +98,7 @@ where tip: Self::Balance, ) -> Result { if fee.is_zero() { - return Ok(None); + return Ok(None) } let withdraw_reason = if tip.is_zero() { @@ -121,8 +132,8 @@ where // refund to the the account that paid the fees. If this fails, the // account might have dropped below the existential balance. In // that case we don't refund anything. - let refund_imbalance = - C::deposit_into_existing(&who, refund_amount).unwrap_or_else(|_| C::PositiveImbalance::zero()); + let refund_imbalance = C::deposit_into_existing(&who, refund_amount) + .unwrap_or_else(|_| C::PositiveImbalance::zero()); // merge the imbalance caused by paying the fees and refunding parts of it again. let adjusted_paid = paid .offset(refund_imbalance) diff --git a/substrate/frame/transaction-payment/src/types.rs b/substrate/frame/transaction-payment/src/types.rs index b5d46a9167a759303750a0532d8ee8c72f00fc12..345bd39718a733461fa9424e40d2bb2081aa33ec 100644 --- a/substrate/frame/transaction-payment/src/types.rs +++ b/substrate/frame/transaction-payment/src/types.rs @@ -17,14 +17,14 @@ //! Types for transaction-payment RPC. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; use sp_std::prelude::*; -use frame_support::weights::{Weight, DispatchClass}; +use frame_support::weights::{DispatchClass, Weight}; /// The base fee and adjusted weight and length fees constitute the _inclusion fee_. #[derive(Encode, Decode, Clone, Eq, PartialEq)] @@ -80,7 +80,11 @@ impl FeeDetails { /// final_fee = inclusion_fee + tip; /// ``` pub fn final_fee(&self) -> Balance { - self.inclusion_fee.as_ref().map(|i| i.inclusion_fee()).unwrap_or_else(|| Zero::zero()).saturating_add(self.tip) + self.inclusion_fee + .as_ref() + .map(|i| i.inclusion_fee()) + .unwrap_or_else(|| Zero::zero()) + .saturating_add(self.tip) } } @@ -105,13 +109,18 @@ pub struct RuntimeDispatchInfo { #[cfg(feature = "std")] mod serde_balance { - use serde::{Deserialize, Serializer, Deserializer}; + use serde::{Deserialize, Deserializer, Serializer}; - pub fn serialize(t: &T, serializer: S) -> Result { + pub fn serialize( + t: &T, + serializer: S, + ) -> Result { serializer.serialize_str(&t.to_string()) } - pub fn deserialize<'de, D: Deserializer<'de>, T: std::str::FromStr>(deserializer: D) -> Result { + pub fn deserialize<'de, D: Deserializer<'de>, T: std::str::FromStr>( + deserializer: D, + ) -> Result { let s = String::deserialize(deserializer)?; s.parse::().map_err(|_| serde::de::Error::custom("Parse from string failed")) } diff --git a/substrate/frame/transaction-storage/src/benchmarking.rs b/substrate/frame/transaction-storage/src/benchmarking.rs index ffb4d23de119f14175ad69663ac9632667cf8ee8..64081c3202c050408110d659cb9d27e5d2030e82 100644 --- a/substrate/frame/transaction-storage/src/benchmarking.rs +++ b/substrate/frame/transaction-storage/src/benchmarking.rs @@ -19,17 +19,18 @@ #![cfg(feature = "runtime-benchmarks")] -use sp_std::*; use super::*; -use sp_runtime::traits::{Zero, One, Bounded}; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::traits::{Currency, OnFinalize, OnInitialize}; +use frame_system::{EventRecord, Pallet as System, RawOrigin}; +use sp_runtime::traits::{Bounded, One, Zero}; +use sp_std::*; use sp_transaction_storage_proof::TransactionStorageProof; -use frame_system::{RawOrigin, Pallet as System, EventRecord}; -use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; -use frame_support::{traits::{Currency, OnFinalize, OnInitialize}}; use crate::Pallet as TransactionStorage; -const PROOF: &[u8] = &hex_literal::hex!(" +const PROOF: &[u8] = &hex_literal::hex!( + " 0104000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 @@ -75,9 +76,11 @@ const PROOF: &[u8] = &hex_literal::hex!(" 0c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b89297 7acaac84d530bd188544c5f9b80b4f23ac50c8e67d9b280f2b31a5707d52b892977acaac84d530bd188544c5f9b104401 0000 -"); +" +); -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; fn assert_last_event(generic_event: ::Event) { let events = System::::events(); @@ -90,7 +93,9 @@ pub fn run_to_block(n: T::BlockNumber) { while frame_system::Pallet::::block_number() < n { crate::Pallet::::on_finalize(frame_system::Pallet::::block_number()); frame_system::Pallet::::on_finalize(frame_system::Pallet::::block_number()); - frame_system::Pallet::::set_block_number(frame_system::Pallet::::block_number() + One::one()); + frame_system::Pallet::::set_block_number( + frame_system::Pallet::::block_number() + One::one(), + ); frame_system::Pallet::::on_initialize(frame_system::Pallet::::block_number()); crate::Pallet::::on_initialize(frame_system::Pallet::::block_number()); } @@ -140,8 +145,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - TransactionStorage, - crate::mock::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/substrate/frame/transaction-storage/src/lib.rs b/substrate/frame/transaction-storage/src/lib.rs index 97dfd76fe677367e36a6b0c136eea920c6996081..3964f42998b4835c0da1c44f683546a40acaf2f6 100644 --- a/substrate/frame/transaction-storage/src/lib.rs +++ b/substrate/frame/transaction-storage/src/lib.rs @@ -28,24 +28,24 @@ mod mock; #[cfg(test)] mod tests; +use codec::{Decode, Encode}; use frame_support::{ - traits::{ReservableCurrency, Currency, OnUnbalanced}, dispatch::{Dispatchable, GetDispatchInfo}, + traits::{Currency, OnUnbalanced, ReservableCurrency}, }; -use sp_std::prelude::*; -use sp_std::{result}; -use codec::{Encode, Decode}; -use sp_runtime::traits::{Saturating, BlakeTwo256, Hash, Zero, One}; +use sp_runtime::traits::{BlakeTwo256, Hash, One, Saturating, Zero}; +use sp_std::{prelude::*, result}; use sp_transaction_storage_proof::{ - TransactionStorageProof, InherentError, - random_chunk, encode_index, - CHUNK_SIZE, INHERENT_IDENTIFIER, DEFAULT_STORAGE_PERIOD, + encode_index, random_chunk, InherentError, TransactionStorageProof, CHUNK_SIZE, + DEFAULT_STORAGE_PERIOD, INHERENT_IDENTIFIER, }; /// A type alias for the balance type from this pallet's point of view. -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>> - ::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; // Re-export pallet items so that they can be accessed from the crate namespace. pub use pallet::*; @@ -76,16 +76,19 @@ fn num_chunks(bytes: u32) -> u32 { #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. type Event: From> + IsType<::Event>; /// A dispatchable call. - type Call: Parameter + Dispatchable + GetDispatchInfo + From>; + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From>; /// The currency trait. type Currency: ReservableCurrency; /// Handler for the unbalanced decrease when fees are burned. @@ -145,8 +148,7 @@ pub mod pallet { fn on_finalize(n: T::BlockNumber) { assert!( - >::take() - || { + >::take() || { // Proof is not required for early or empty blocks. let number = >::block_number(); let period = >::get(); @@ -174,12 +176,12 @@ pub mod pallet { /// Additionally contains a DB write. /// # #[pallet::weight(T::WeightInfo::store(data.len() as u32))] - pub fn store( - origin: OriginFor, - data: Vec, - ) -> DispatchResult { + pub fn store(origin: OriginFor, data: Vec) -> DispatchResult { ensure!(data.len() > 0, Error::::EmptyTransaction); - ensure!(data.len() <= MaxTransactionSize::::get() as usize, Error::::TransactionTooLarge); + ensure!( + data.len() <= MaxTransactionSize::::get() as usize, + Error::::TransactionTooLarge + ); let sender = ensure_signed(origin)?; Self::apply_fee(sender, data.len() as u32)?; @@ -189,8 +191,8 @@ pub mod pallet { let root = sp_io::trie::blake2_256_ordered_root(chunks); let content_hash = sp_io::hashing::blake2_256(&data); - let extrinsic_index = >::extrinsic_index().ok_or_else( - || Error::::BadContext)?; + let extrinsic_index = >::extrinsic_index() + .ok_or_else(|| Error::::BadContext)?; sp_io::transaction_index::index(extrinsic_index, data.len() as u32, content_hash); let mut index = 0; @@ -277,11 +279,14 @@ pub mod pallet { let selected_chunk_index = random_chunk(parent_hash.as_ref(), total_chunks); let (info, chunk_index) = match >::get(target_number) { Some(infos) => { - let index = match infos.binary_search_by_key(&selected_chunk_index, |info| info.block_chunks) { + let index = match infos + .binary_search_by_key(&selected_chunk_index, |info| info.block_chunks) + { Ok(index) => index, Err(index) => index, }; - let info = infos.get(index).ok_or_else(|| Error::::MissingStateData)?.clone(); + let info = + infos.get(index).ok_or_else(|| Error::::MissingStateData)?.clone(); let chunks = num_chunks(info.size); let prev_chunks = info.block_chunks - chunks; (info, selected_chunk_index - prev_chunks) @@ -317,23 +322,13 @@ pub mod pallet { /// Collection of transaction metadata by block number. #[pallet::storage] #[pallet::getter(fn transaction_roots)] - pub(super) type Transactions = StorageMap< - _, - Blake2_128Concat, - T::BlockNumber, - Vec, - OptionQuery, - >; + pub(super) type Transactions = + StorageMap<_, Blake2_128Concat, T::BlockNumber, Vec, OptionQuery>; /// Count indexed chunks for each block. #[pallet::storage] - pub(super) type ChunkCount = StorageMap< - _, - Blake2_128Concat, - T::BlockNumber, - u32, - ValueQuery, - >; + pub(super) type ChunkCount = + StorageMap<_, Blake2_128Concat, T::BlockNumber, u32, ValueQuery>; #[pallet::storage] #[pallet::getter(fn byte_fee)] @@ -362,13 +357,13 @@ pub mod pallet { // Intermediates #[pallet::storage] - pub(super) type BlockTransactions = StorageValue<_, Vec, ValueQuery>; + pub(super) type BlockTransactions = + StorageValue<_, Vec, ValueQuery>; /// Was the proof checked in this block? #[pallet::storage] pub(super) type ProofChecked = StorageValue<_, bool, ValueQuery>; - #[pallet::genesis_config] pub struct GenesisConfig { pub byte_fee: BalanceOf, @@ -409,11 +404,16 @@ pub mod pallet { const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; fn create_inherent(data: &InherentData) -> Option { - let proof = data.get_data::(&Self::INHERENT_IDENTIFIER).unwrap_or(None); + let proof = data + .get_data::(&Self::INHERENT_IDENTIFIER) + .unwrap_or(None); proof.map(Call::check_proof) } - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + fn check_inherent( + _call: &Self::Call, + _data: &InherentData, + ) -> result::Result<(), Self::Error> { Ok(()) } diff --git a/substrate/frame/transaction-storage/src/mock.rs b/substrate/frame/transaction-storage/src/mock.rs index 344d7b736953394ffb48648efa65598ebac536a8..17a5d8097b671cce5abe71406798536cbc69f96d 100644 --- a/substrate/frame/transaction-storage/src/mock.rs +++ b/substrate/frame/transaction-storage/src/mock.rs @@ -19,13 +19,16 @@ use crate as pallet_transaction_storage; use crate::TransactionStorageProof; -use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header, BuildStorage}; use frame_support::{ parameter_types, - traits::{OnInitialize, OnFinalize}, + traits::{OnFinalize, OnInitialize}, +}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + BuildStorage, }; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; pub type Block = frame_system::mocking::MockBlock; @@ -104,7 +107,7 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let t = GenesisConfig { system: Default::default(), balances: pallet_balances::GenesisConfig:: { - balances: vec![(1, 1000000000), (2, 100), (3, 100), (4, 100)] + balances: vec![(1, 1000000000), (2, 100), (3, 100), (4, 100)], }, transaction_storage: pallet_transaction_storage::GenesisConfig:: { storage_period: 10, @@ -113,7 +116,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { max_block_transactions: crate::DEFAULT_MAX_BLOCK_TRANSACTIONS, max_transaction_size: crate::DEFAULT_MAX_TRANSACTION_SIZE, }, - }.build_storage().unwrap(); + } + .build_storage() + .unwrap(); t.into() } diff --git a/substrate/frame/transaction-storage/src/tests.rs b/substrate/frame/transaction-storage/src/tests.rs index 50594f1bce9dca32c16f192fc4dcd76c8fe83c30..c443f51ffb50f6a46567621b608a9b383eb4af61 100644 --- a/substrate/frame/transaction-storage/src/tests.rs +++ b/substrate/frame/transaction-storage/src/tests.rs @@ -17,10 +17,9 @@ //! Tests for transction-storage pallet. -use super::*; +use super::{Pallet as TransactionStorage, *}; use crate::mock::*; -use super::Pallet as TransactionStorage; -use frame_support::{assert_ok, assert_noop}; +use frame_support::{assert_noop, assert_ok}; use frame_system::RawOrigin; use sp_transaction_storage_proof::registration::build_proof; @@ -41,9 +40,12 @@ fn discards_data() { )); let proof_provider = || { let block_num = >::block_number(); - if block_num == 11 { + if block_num == 11 { let parent_hash = >::parent_hash(); - Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000], vec![0u8; 2000]]).unwrap()) + Some( + build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000], vec![0u8; 2000]]) + .unwrap(), + ) } else { None } @@ -64,15 +66,16 @@ fn burns_fee() { new_test_ext().execute_with(|| { run_to_block(1, || None); let caller = 1; - assert_noop!(TransactionStorage::::store( + assert_noop!( + TransactionStorage::::store( RawOrigin::Signed(5).into(), vec![0u8; 2000 as usize] ), Error::::InsufficientFunds, ); assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), - vec![0u8; 2000 as usize] + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000 as usize] )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 2000 * 2 - 200); }); @@ -89,34 +92,23 @@ fn checks_proof() { )); run_to_block(10, || None); let parent_hash = >::parent_hash(); - let proof = build_proof( - parent_hash.as_ref(), - vec![vec![0u8; MAX_DATA_SIZE as usize]] - ).unwrap(); - assert_noop!(TransactionStorage::::check_proof( - Origin::none(), - proof, - ), + let proof = + build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); + assert_noop!( + TransactionStorage::::check_proof(Origin::none(), proof,), Error::::UnexpectedProof, ); run_to_block(11, || None); let parent_hash = >::parent_hash(); - let invalid_proof = build_proof( - parent_hash.as_ref(), - vec![vec![0u8; 1000]] - ).unwrap(); - assert_noop!(TransactionStorage::::check_proof( - Origin::none(), - invalid_proof, - ), - Error::::InvalidProof, + let invalid_proof = build_proof(parent_hash.as_ref(), vec![vec![0u8; 1000]]).unwrap(); + assert_noop!( + TransactionStorage::::check_proof(Origin::none(), invalid_proof,), + Error::::InvalidProof, ); - let proof = build_proof( - parent_hash.as_ref(), - vec![vec![0u8; MAX_DATA_SIZE as usize]] - ).unwrap(); + let proof = + build_proof(parent_hash.as_ref(), vec![vec![0u8; MAX_DATA_SIZE as usize]]).unwrap(); assert_ok!(TransactionStorage::::check_proof(Origin::none(), proof)); }); } @@ -127,20 +119,20 @@ fn renews_data() { run_to_block(1, || None); let caller = 1; assert_ok!(TransactionStorage::::store( - RawOrigin::Signed(caller.clone()).into(), - vec![0u8; 2000] + RawOrigin::Signed(caller.clone()).into(), + vec![0u8; 2000] )); let info = BlockTransactions::::get().last().unwrap().clone(); run_to_block(6, || None); assert_ok!(TransactionStorage::::renew( - RawOrigin::Signed(caller.clone()).into(), - 1, // block - 0, // transaction + RawOrigin::Signed(caller.clone()).into(), + 1, // block + 0, // transaction )); assert_eq!(Balances::free_balance(1), 1_000_000_000 - 4000 * 2 - 200 * 2); let proof_provider = || { let block_num = >::block_number(); - if block_num == 11 || block_num == 16 { + if block_num == 11 || block_num == 16 { let parent_hash = >::parent_hash(); Some(build_proof(parent_hash.as_ref(), vec![vec![0u8; 2000]]).unwrap()) } else { @@ -154,4 +146,3 @@ fn renews_data() { assert!(Transactions::::get(6).is_none()); }); } - diff --git a/substrate/frame/transaction-storage/src/weights.rs b/substrate/frame/transaction-storage/src/weights.rs index 46fc664d977c61cdd9b170a99b6579a6e1fd6e66..82259e60d874f1d90343cb82c7192760fbde883f 100644 --- a/substrate/frame/transaction-storage/src/weights.rs +++ b/substrate/frame/transaction-storage/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/treasury/src/benchmarking.rs b/substrate/frame/treasury/src/benchmarking.rs index cc5db8ce94c723fd01e95cd8e8d9b24d10e4cb05..98fed2c6a536a1210c310c0f370d774710f7ba72 100644 --- a/substrate/frame/treasury/src/benchmarking.rs +++ b/substrate/frame/treasury/src/benchmarking.rs @@ -19,20 +19,18 @@ #![cfg(feature = "runtime-benchmarks")] -use super::{*, Pallet as Treasury}; +use super::{Pallet as Treasury, *}; -use frame_benchmarking::{benchmarks_instance_pallet, account, impl_benchmark_test_suite}; -use frame_support::{traits::OnInitialize, ensure}; +use frame_benchmarking::{account, benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_support::{ensure, traits::OnInitialize}; use frame_system::RawOrigin; const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. -fn setup_proposal, I: 'static>(u: u32) -> ( - T::AccountId, - BalanceOf, - ::Source, -) { +fn setup_proposal, I: 'static>( + u: u32, +) -> (T::AccountId, BalanceOf, ::Source) { let caller = account("caller", u, SEED); let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100u32.into()); let _ = T::Currency::make_free_balance_be(&caller, value); @@ -43,13 +41,9 @@ fn setup_proposal, I: 'static>(u: u32) -> ( // Create proposals that are approved for use in `on_initialize`. fn create_approved_proposals, I: 'static>(n: u32) -> Result<(), &'static str> { - for i in 0 .. n { + for i in 0..n { let (caller, value, lookup) = setup_proposal::(i); - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - lookup - )?; + Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; let proposal_id = >::get() - 1; Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; } @@ -102,8 +96,4 @@ benchmarks_instance_pallet! { } } -impl_benchmark_test_suite!( - Treasury, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/treasury/src/lib.rs b/substrate/frame/treasury/src/lib.rs index b6b9097e3a366d76aa69bde1eeeb3219d403534e..207d51905af53a26d8fce1f4a492dfa41ffb8a60 100644 --- a/substrate/frame/treasury/src/lib.rs +++ b/substrate/frame/treasury/src/lib.rs @@ -57,37 +57,40 @@ #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; #[cfg(test)] mod tests; -mod benchmarking; pub mod weights; -use codec::{Encode, Decode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; -use sp_std::prelude::*; use sp_runtime::{ + traits::{AccountIdConversion, Saturating, StaticLookup, Zero}, Permill, RuntimeDebug, - traits::{ - Zero, StaticLookup, AccountIdConversion, Saturating - } }; +use sp_std::prelude::*; -use frame_support::{print, PalletId}; -use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::KeepAlive, - ReservableCurrency, WithdrawReasons +use frame_support::{ + print, + traits::{ + Currency, ExistenceRequirement::KeepAlive, Get, Imbalance, OnUnbalanced, + ReservableCurrency, WithdrawReasons, + }, + weights::Weight, + PalletId, }; -use frame_support::weights::Weight; -pub use weights::WeightInfo; pub use pallet::*; +pub use weights::WeightInfo; pub type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -pub type PositiveImbalanceOf = - <>::Currency as Currency<::AccountId>>::PositiveImbalance; -pub type NegativeImbalanceOf = - <>::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type PositiveImbalanceOf = <>::Currency as Currency< + ::AccountId, +>>::PositiveImbalance; +pub type NegativeImbalanceOf = <>::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. /// There is an expectation that the implementer of this trait will correctly manage @@ -130,9 +133,9 @@ pub struct Proposal { #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -204,17 +207,14 @@ pub mod pallet { Twox64Concat, ProposalIndex, Proposal>, - OptionQuery + OptionQuery, >; /// Proposal indices that have been approved but not yet awarded. #[pallet::storage] #[pallet::getter(fn approvals)] - pub type Approvals, I: 'static = ()> = StorageValue< - _, - BoundedVec, - ValueQuery - >; + pub type Approvals, I: 'static = ()> = + StorageValue<_, BoundedVec, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig; @@ -229,10 +229,12 @@ pub mod pallet { #[cfg(feature = "std")] impl GenesisConfig { /// Direct implementation of `GenesisBuild::assimilate_storage`. - #[deprecated(note = "use ` as GenesisBuild>::assimilate_storage` instead")] + #[deprecated( + note = "use ` as GenesisBuild>::assimilate_storage` instead" + )] pub fn assimilate_storage, I: 'static>( &self, - storage: &mut sp_runtime::Storage + storage: &mut sp_runtime::Storage, ) -> Result<(), String> { >::assimilate_storage(self, storage) } @@ -272,8 +274,8 @@ pub mod pallet { } /// Old name generated by `decl_event`. - #[deprecated(note = "use `Event` instead")] - pub type RawEvent = Event; + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; /// Error for the treasury pallet. #[pallet::error] @@ -320,7 +322,7 @@ pub mod pallet { pub fn propose_spend( origin: OriginFor, #[pallet::compact] value: BalanceOf, - beneficiary: ::Source + beneficiary: ::Source, ) -> DispatchResult { let proposer = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; @@ -349,11 +351,12 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::reject_proposal(), DispatchClass::Operational))] pub fn reject_proposal( origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex + #[pallet::compact] proposal_id: ProposalIndex, ) -> DispatchResult { T::RejectOrigin::ensure_origin(origin)?; - let proposal = >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; + let proposal = + >::take(&proposal_id).ok_or(Error::::InvalidIndex)?; let value = proposal.bond; let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; T::OnSlash::on_unbalanced(imbalance); @@ -375,12 +378,13 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational))] pub fn approve_proposal( origin: OriginFor, - #[pallet::compact] proposal_id: ProposalIndex + #[pallet::compact] proposal_id: ProposalIndex, ) -> DispatchResult { T::ApproveOrigin::ensure_origin(origin)?; ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); - Approvals::::try_append(proposal_id).map_err(|_| Error::::TooManyApprovals)?; + Approvals::::try_append(proposal_id) + .map_err(|_| Error::::TooManyApprovals)?; Ok(()) } } @@ -444,7 +448,12 @@ impl, I: 'static> Pallet { total_weight += T::WeightInfo::on_initialize_proposals(proposals_len); // Call Runtime hooks to external pallet using treasury to compute spend funds. - T::SpendFunds::spend_funds( &mut budget_remaining, &mut imbalance, &mut total_weight, &mut missed_any); + T::SpendFunds::spend_funds( + &mut budget_remaining, + &mut imbalance, + &mut total_weight, + &mut missed_any, + ); if !missed_any { // burn some proportion of the remaining budget if we run a surplus. @@ -461,12 +470,9 @@ impl, I: 'static> Pallet { // proof: budget_remaining is account free balance minus ED; // Thus we can't spend more than account free balance minus ED; // Thus account is kept alive; qed; - if let Err(problem) = T::Currency::settle( - &account_id, - imbalance, - WithdrawReasons::TRANSFER, - KeepAlive - ) { + if let Err(problem) = + T::Currency::settle(&account_id, imbalance, WithdrawReasons::TRANSFER, KeepAlive) + { print("Inconsistent state - couldn't settle imbalance for funds spent by treasury"); // Nothing else to do here. drop(problem); diff --git a/substrate/frame/treasury/src/tests.rs b/substrate/frame/treasury/src/tests.rs index a59491e1f6e9d7ff746744106e5da151c9aca79d..cf341d5ad80f543999eaeb07160c107f25d0bc3d 100644 --- a/substrate/frame/treasury/src/tests.rs +++ b/substrate/frame/treasury/src/tests.rs @@ -28,12 +28,12 @@ use sp_runtime::{ }; use frame_support::{ - assert_noop, assert_ok, parameter_types, - traits::OnInitialize, PalletId, pallet_prelude::GenesisBuild, + assert_noop, assert_ok, pallet_prelude::GenesisBuild, parameter_types, traits::OnInitialize, + PalletId, }; -use crate as treasury; use super::*; +use crate as treasury; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -119,7 +119,7 @@ impl Config for Test { type ProposalBondMinimum = ProposalBondMinimum; type SpendPeriod = SpendPeriod; type Burn = Burn; - type BurnDestination = (); // Just gets burned. + type BurnDestination = (); // Just gets burned. type WeightInfo = (); type SpendFunds = (); type MaxApprovals = MaxApprovals; @@ -127,10 +127,12 @@ impl Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized at ED. balances: vec![(0, 100), (1, 98), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); t.into() } @@ -320,9 +322,9 @@ fn treasury_account_doesnt_get_deleted() { #[test] fn inexistent_account_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(0, 100), (1, 99), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(0, 100), (1, 99), (2, 1)] } + .assimilate_storage(&mut t) + .unwrap(); // Treasury genesis config is not build thus treasury account does not exist let mut t: sp_io::TestExternalities = t.into(); @@ -353,10 +355,12 @@ fn inexistent_account_works() { fn genesis_funding_works() { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let initial_funding = 100; - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { // Total issuance will be 200 with treasury account initialized with 100. balances: vec![(0, 100), (Treasury::account_id(), initial_funding)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); let mut t: sp_io::TestExternalities = t.into(); @@ -372,13 +376,16 @@ fn max_approvals_limited() { Balances::make_free_balance_be(&Treasury::account_id(), u64::MAX); Balances::make_free_balance_be(&0, u64::MAX); - for _ in 0 .. MaxApprovals::get() { + for _ in 0..MaxApprovals::get() { assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); } // One too many will fail assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::TooManyApprovals); + assert_noop!( + Treasury::approve_proposal(Origin::root(), 0), + Error::::TooManyApprovals + ); }); } diff --git a/substrate/frame/treasury/src/weights.rs b/substrate/frame/treasury/src/weights.rs index d293399e7b480297cde82a5f653b6be29ff54198..234d71e3add2ed41d5472e0e56290ac0609fb935 100644 --- a/substrate/frame/treasury/src/weights.rs +++ b/substrate/frame/treasury/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/try-runtime/src/lib.rs b/substrate/frame/try-runtime/src/lib.rs index dcd3a478782378ce8d4d83068e1d400fbe2a40d5..b2dfdfac6429efc9680e84c49a000a6d0ea7b9fe 100644 --- a/substrate/frame/try-runtime/src/lib.rs +++ b/substrate/frame/try-runtime/src/lib.rs @@ -19,8 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; use frame_support::weights::Weight; +use sp_std::prelude::*; sp_api::decl_runtime_apis! { /// Runtime api for testing the execution of a runtime upgrade. diff --git a/substrate/frame/uniques/src/benchmarking.rs b/substrate/frame/uniques/src/benchmarking.rs index ca6d656bd5005741b694c39c3e2c1293c3dea7b3..20ddbb15d53609c3e1fb75910681a28887e21fff 100644 --- a/substrate/frame/uniques/src/benchmarking.rs +++ b/substrate/frame/uniques/src/benchmarking.rs @@ -19,22 +19,26 @@ #![cfg(feature = "runtime-benchmarks")] -use sp_std::{prelude::*, convert::TryInto}; use super::*; -use sp_runtime::traits::Bounded; -use frame_system::RawOrigin as SystemOrigin; use frame_benchmarking::{ - benchmarks_instance_pallet, account, whitelisted_caller, whitelist_account, impl_benchmark_test_suite + account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, + whitelisted_caller, +}; +use frame_support::{ + dispatch::UnfilteredDispatchable, + traits::{EnsureOrigin, Get}, + BoundedVec, }; -use frame_support::{traits::{Get, EnsureOrigin}, dispatch::UnfilteredDispatchable, BoundedVec}; +use frame_system::RawOrigin as SystemOrigin; +use sp_runtime::traits::Bounded; +use sp_std::{convert::TryInto, prelude::*}; use crate::Pallet as Uniques; const SEED: u32 = 0; -fn create_class, I: 'static>() - -> (T::ClassId, T::AccountId, ::Source) -{ +fn create_class, I: 'static>( +) -> (T::ClassId, T::AccountId, ::Source) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let class = Default::default(); @@ -43,13 +47,13 @@ fn create_class, I: 'static>() SystemOrigin::Signed(caller.clone()).into(), class, caller_lookup.clone(), - ).is_ok()); + ) + .is_ok()); (class, caller, caller_lookup) } -fn add_class_metadata, I: 'static>() - -> (T::AccountId, ::Source) -{ +fn add_class_metadata, I: 'static>( +) -> (T::AccountId, ::Source) { let caller = Class::::get(T::ClassId::default()).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -60,13 +64,14 @@ fn add_class_metadata, I: 'static>() Default::default(), vec![0; T::StringLimit::get() as usize].try_into().unwrap(), false, - ).is_ok()); + ) + .is_ok()); (caller, caller_lookup) } -fn mint_instance, I: 'static>(index: u16) - -> (T::InstanceId, T::AccountId, ::Source) -{ +fn mint_instance, I: 'static>( + index: u16, +) -> (T::InstanceId, T::AccountId, ::Source) { let caller = Class::::get(T::ClassId::default()).unwrap().admin; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -78,13 +83,14 @@ fn mint_instance, I: 'static>(index: u16) Default::default(), instance, caller_lookup.clone(), - ).is_ok()); + ) + .is_ok()); (instance, caller, caller_lookup) } -fn add_instance_metadata, I: 'static>(instance: T::InstanceId) - -> (T::AccountId, ::Source) -{ +fn add_instance_metadata, I: 'static>( + instance: T::InstanceId, +) -> (T::AccountId, ::Source) { let caller = Class::::get(T::ClassId::default()).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -96,13 +102,14 @@ fn add_instance_metadata, I: 'static>(instance: T::InstanceId) instance, vec![0; T::StringLimit::get() as usize].try_into().unwrap(), false, - ).is_ok()); + ) + .is_ok()); (caller, caller_lookup) } -fn add_instance_attribute, I: 'static>(instance: T::InstanceId) - -> (BoundedVec, T::AccountId, ::Source) -{ +fn add_instance_attribute, I: 'static>( + instance: T::InstanceId, +) -> (BoundedVec, T::AccountId, ::Source) { let caller = Class::::get(T::ClassId::default()).unwrap().owner; if caller != whitelisted_caller() { whitelist_account!(caller); @@ -115,7 +122,8 @@ fn add_instance_attribute, I: 'static>(instance: T::InstanceId) Some(instance), key.clone(), vec![0; T::ValueLimit::get() as usize].try_into().unwrap(), - ).is_ok()); + ) + .is_ok()); (key, caller, caller_lookup) } diff --git a/substrate/frame/uniques/src/functions.rs b/substrate/frame/uniques/src/functions.rs index 28ff5ac6a703306964cf1e46ad836cd69c4ab58c..5d1e75735752bad0e51864b4de5db47c16e1142a 100644 --- a/substrate/frame/uniques/src/functions.rs +++ b/substrate/frame/uniques/src/functions.rs @@ -19,7 +19,7 @@ use super::*; use frame_support::{ensure, traits::Get}; -use sp_runtime::{DispatchResult, DispatchError}; +use sp_runtime::{DispatchError, DispatchResult}; impl, I: 'static> Pallet { pub(crate) fn do_transfer( @@ -52,9 +52,7 @@ impl, I: 'static> Pallet { class: T::ClassId, instance: T::InstanceId, owner: T::AccountId, - with_details: impl FnOnce( - &ClassDetailsFor, - ) -> DispatchResult, + with_details: impl FnOnce(&ClassDetailsFor) -> DispatchResult, ) -> DispatchResult { ensure!(!Asset::::contains_key(class, instance), Error::::AlreadyExists); @@ -63,8 +61,8 @@ impl, I: 'static> Pallet { with_details(&class_details)?; - let instances = class_details.instances.checked_add(1) - .ok_or(ArithmeticError::Overflow)?; + let instances = + class_details.instances.checked_add(1).ok_or(ArithmeticError::Overflow)?; class_details.instances = instances; let deposit = match class_details.free_holding { @@ -76,7 +74,7 @@ impl, I: 'static> Pallet { let owner = owner.clone(); Account::::insert((&owner, &class, &instance), ()); - let details = InstanceDetails { owner, approved: None, is_frozen: false, deposit}; + let details = InstanceDetails { owner, approved: None, is_frozen: false, deposit }; Asset::::insert(&class, &instance, details); Ok(()) })?; @@ -88,23 +86,23 @@ impl, I: 'static> Pallet { pub(super) fn do_burn( class: T::ClassId, instance: T::InstanceId, - with_details: impl FnOnce( - &ClassDetailsFor, - &InstanceDetailsFor, - ) -> DispatchResult, + with_details: impl FnOnce(&ClassDetailsFor, &InstanceDetailsFor) -> DispatchResult, ) -> DispatchResult { - let owner = Class::::try_mutate(&class, |maybe_class_details| -> Result { - let class_details = maybe_class_details.as_mut().ok_or(Error::::Unknown)?; - let details = Asset::::get(&class, &instance) - .ok_or(Error::::Unknown)?; - with_details(&class_details, &details)?; - - // Return the deposit. - T::Currency::unreserve(&class_details.owner, details.deposit); - class_details.total_deposit.saturating_reduce(details.deposit); - class_details.instances.saturating_dec(); - Ok(details.owner) - })?; + let owner = Class::::try_mutate( + &class, + |maybe_class_details| -> Result { + let class_details = maybe_class_details.as_mut().ok_or(Error::::Unknown)?; + let details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; + with_details(&class_details, &details)?; + + // Return the deposit. + T::Currency::unreserve(&class_details.owner, details.deposit); + class_details.total_deposit.saturating_reduce(details.deposit); + class_details.instances.saturating_dec(); + Ok(details.owner) + }, + )?; Asset::::remove(&class, &instance); Account::::remove((&owner, &class, &instance)); diff --git a/substrate/frame/uniques/src/impl_nonfungibles.rs b/substrate/frame/uniques/src/impl_nonfungibles.rs index 7113f314697a1475a514030c66e6c8ea4c5f9750..fb1e28d4c77bf9515438ada295534e2cfc8897a0 100644 --- a/substrate/frame/uniques/src/impl_nonfungibles.rs +++ b/substrate/frame/uniques/src/impl_nonfungibles.rs @@ -18,10 +18,12 @@ //! Implementations for `nonfungibles` traits. use super::*; -use sp_std::convert::TryFrom; -use frame_support::traits::tokens::nonfungibles::{Inspect, InspectEnumerable, Mutate, Transfer}; -use frame_support::BoundedSlice; +use frame_support::{ + traits::tokens::nonfungibles::{Inspect, InspectEnumerable, Mutate, Transfer}, + BoundedSlice, +}; use sp_runtime::DispatchResult; +use sp_std::convert::TryFrom; impl, I: 'static> Inspect<::AccountId> for Pallet { type InstanceId = T::InstanceId; @@ -43,9 +45,11 @@ impl, I: 'static> Inspect<::AccountId> for Palle /// When `key` is empty, we return the instance metadata value. /// /// By default this is `None`; no attributes are defined. - fn attribute(class: &Self::ClassId, instance: &Self::InstanceId, key: &[u8]) - -> Option> - { + fn attribute( + class: &Self::ClassId, + instance: &Self::InstanceId, + key: &[u8], + ) -> Option> { if key.is_empty() { // We make the empty key map to the instance metadata value. InstanceMetadataOf::::get(class, instance).map(|m| m.data.into()) @@ -60,9 +64,7 @@ impl, I: 'static> Inspect<::AccountId> for Palle /// When `key` is empty, we return the instance metadata value. /// /// By default this is `None`; no attributes are defined. - fn class_attribute(class: &Self::ClassId, key: &[u8]) - -> Option> - { + fn class_attribute(class: &Self::ClassId, key: &[u8]) -> Option> { if key.is_empty() { // We make the empty key map to the instance metadata value. ClassMetadataOf::::get(class).map(|m| m.data.into()) @@ -132,7 +134,10 @@ impl, I: 'static> InspectEnumerable for Pallet /// Returns an iterator of the asset instances of `class` owned by `who`. /// /// NOTE: iterating this list invokes a storage read per item. - fn owned_in_class(class: &Self::ClassId, who: &T::AccountId) -> Box> { + fn owned_in_class( + class: &Self::ClassId, + who: &T::AccountId, + ) -> Box> { Box::new(Account::::iter_key_prefix((who, class))) } } diff --git a/substrate/frame/uniques/src/lib.rs b/substrate/frame/uniques/src/lib.rs index 2275be6419ca84128d6f901cc6dd9f7eeccdffd7..d42b2ec55c964f6c77ff3575da89cb86eb3c5b23 100644 --- a/substrate/frame/uniques/src/lib.rs +++ b/substrate/frame/uniques/src/lib.rs @@ -27,33 +27,36 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -pub mod weights; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[cfg(test)] pub mod mock; #[cfg(test)] mod tests; +pub mod weights; -mod types; mod functions; mod impl_nonfungibles; +mod types; pub use types::*; -use sp_std::prelude::*; -use sp_runtime::{RuntimeDebug, ArithmeticError, traits::{Zero, StaticLookup, Saturating}}; -use codec::{Encode, Decode, HasCompact}; -use frame_support::traits::{Currency, ReservableCurrency, BalanceStatus::Reserved}; +use codec::{Decode, Encode, HasCompact}; +use frame_support::traits::{BalanceStatus::Reserved, Currency, ReservableCurrency}; use frame_system::Config as SystemConfig; +use sp_runtime::{ + traits::{Saturating, StaticLookup, Zero}, + ArithmeticError, RuntimeDebug, +}; +use sp_std::prelude::*; -pub use weights::WeightInfo; pub use pallet::*; +pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -182,7 +185,7 @@ pub mod pallet { NMapKey>, ), (BoundedVec, DepositBalanceOf), - OptionQuery + OptionQuery, >; #[pallet::event] @@ -190,7 +193,7 @@ pub mod pallet { #[pallet::metadata( T::AccountId = "AccountId", T::ClassId = "ClassId", - T::InstanceId = "InstanceId", + T::InstanceId = "InstanceId" )] pub enum Event, I: 'static = ()> { /// An asset class was created. \[ class, creator, owner \] @@ -419,7 +422,10 @@ pub mod pallet { ensure!(class_details.owner == check_owner, Error::::NoPermission); } ensure!(class_details.instances == witness.instances, Error::::BadWitness); - ensure!(class_details.instance_metadatas == witness.instance_metadatas, Error::::BadWitness); + ensure!( + class_details.instance_metadatas == witness.instance_metadatas, + Error::::BadWitness + ); ensure!(class_details.attributes == witness.attributes, Error::::BadWitness); for (instance, details) in Asset::::drain_prefix(&class) { @@ -490,7 +496,10 @@ pub mod pallet { Self::do_burn(class, instance, |class_details, details| { let is_permitted = class_details.admin == origin || details.owner == origin; ensure!(is_permitted, Error::::NoPermission); - ensure!(check_owner.map_or(true, |o| o == details.owner), Error::::WrongOwner); + ensure!( + check_owner.map_or(true, |o| o == details.owner), + Error::::WrongOwner + ); Ok(()) }) } @@ -610,8 +619,8 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - let mut details = Asset::::get(&class, &instance) - .ok_or(Error::::Unknown)?; + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; ensure!(class_details.freezer == origin, Error::::NoPermission); @@ -640,8 +649,8 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - let mut details = Asset::::get(&class, &instance) - .ok_or(Error::::Unknown)?; + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; ensure!(class_details.admin == origin, Error::::NoPermission); @@ -664,7 +673,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::freeze_class())] pub fn freeze_class( origin: OriginFor, - #[pallet::compact] class: T::ClassId + #[pallet::compact] class: T::ClassId, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -691,7 +700,7 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::thaw_class())] pub fn thaw_class( origin: OriginFor, - #[pallet::compact] class: T::ClassId + #[pallet::compact] class: T::ClassId, ) -> DispatchResult { let origin = ensure_signed(origin)?; @@ -729,7 +738,7 @@ pub mod pallet { let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; ensure!(&origin == &details.owner, Error::::NoPermission); if details.owner == owner { - return Ok(()); + return Ok(()) } // Move the deposit to the new owner. @@ -809,8 +818,8 @@ pub mod pallet { let delegate = T::Lookup::lookup(delegate)?; let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; - let mut details = Asset::::get(&class, &instance) - .ok_or(Error::::Unknown)?; + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; if let Some(check) = maybe_check { let permitted = &check == &class_details.admin || &check == &details.owner; @@ -854,8 +863,8 @@ pub mod pallet { .or_else(|origin| ensure_signed(origin).map(Some).map_err(DispatchError::from))?; let class_details = Class::::get(&class).ok_or(Error::::Unknown)?; - let mut details = Asset::::get(&class, &instance) - .ok_or(Error::::Unknown)?; + let mut details = + Asset::::get(&class, &instance).ok_or(Error::::Unknown)?; if let Some(check) = maybe_check { let permitted = &check == &class_details.admin || &check == &details.owner; ensure!(permitted, Error::::NoPermission); @@ -1060,8 +1069,7 @@ pub mod pallet { .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some))?; - let mut class_details = Class::::get(&class) - .ok_or(Error::::Unknown)?; + let mut class_details = Class::::get(&class).ok_or(Error::::Unknown)?; if let Some(check_owner) = &maybe_check_owner { ensure!(check_owner == &class_details.owner, Error::::NoPermission); @@ -1089,11 +1097,7 @@ pub mod pallet { } class_details.total_deposit.saturating_accrue(deposit); - *metadata = Some(InstanceMetadata { - deposit, - data: data.clone(), - is_frozen, - }); + *metadata = Some(InstanceMetadata { deposit, data: data.clone(), is_frozen }); Class::::insert(&class, &class_details); Self::deposit_event(Event::MetadataSet(class, instance, data, is_frozen)); @@ -1124,8 +1128,7 @@ pub mod pallet { .map(|_| None) .or_else(|origin| ensure_signed(origin).map(Some))?; - let mut class_details = Class::::get(&class) - .ok_or(Error::::Unknown)?; + let mut class_details = Class::::get(&class).ok_or(Error::::Unknown)?; if let Some(check_owner) = &maybe_check_owner { ensure!(check_owner == &class_details.owner, Error::::NoPermission); } @@ -1200,11 +1203,7 @@ pub mod pallet { Class::::insert(&class, details); - *metadata = Some(ClassMetadata { - deposit, - data: data.clone(), - is_frozen, - }); + *metadata = Some(ClassMetadata { deposit, data: data.clone(), is_frozen }); Self::deposit_event(Event::ClassMetadataSet(class, data, is_frozen)); Ok(()) diff --git a/substrate/frame/uniques/src/mock.rs b/substrate/frame/uniques/src/mock.rs index 254acd6c419cf98c972ea2c5be024e1f38393121..4b80aa73030cfe840ccc79c469a8dea9ea7aa814 100644 --- a/substrate/frame/uniques/src/mock.rs +++ b/substrate/frame/uniques/src/mock.rs @@ -20,9 +20,12 @@ use super::*; use crate as pallet_uniques; +use frame_support::{construct_runtime, parameter_types}; use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use frame_support::{parameter_types, construct_runtime}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; diff --git a/substrate/frame/uniques/src/tests.rs b/substrate/frame/uniques/src/tests.rs index 4673ff71f8ed9fe87b8ffe6ee58595b7b1031af0..8a4f978b7f4f511bd8b08d6dcaaf12397aae302c 100644 --- a/substrate/frame/uniques/src/tests.rs +++ b/substrate/frame/uniques/src/tests.rs @@ -19,9 +19,9 @@ use super::*; use crate::mock::*; -use sp_std::convert::TryInto; -use frame_support::{assert_ok, assert_noop, traits::Currency}; +use frame_support::{assert_noop, assert_ok, traits::Currency}; use pallet_balances::Error as BalancesError; +use sp_std::convert::TryInto; fn assets() -> Vec<(u64, u32, u32)> { let mut r: Vec<_> = Account::::iter().map(|x| x.0).collect(); @@ -31,13 +31,15 @@ fn assets() -> Vec<(u64, u32, u32)> { assert_eq!(r, s); for class in Asset::::iter() .map(|x| x.0) - .scan(None, |s, item| if s.map_or(false, |last| last == item) { + .scan(None, |s, item| { + if s.map_or(false, |last| last == item) { *s = Some(item); Some(None) } else { Some(Some(item)) } - ).filter_map(|item| item) + }) + .filter_map(|item| item) { let details = Class::::get(class).unwrap(); let instances = Asset::::iter_prefix(class).count() as u32; @@ -181,7 +183,10 @@ fn origin_guards_should_work() { new_test_ext().execute_with(|| { assert_ok!(Uniques::force_create(Origin::root(), 0, 1, true)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 1)); - assert_noop!(Uniques::transfer_ownership(Origin::signed(2), 0, 2), Error::::NoPermission); + assert_noop!( + Uniques::transfer_ownership(Origin::signed(2), 0, 2), + Error::::NoPermission + ); assert_noop!(Uniques::set_team(Origin::signed(2), 0, 2, 2, 2), Error::::NoPermission); assert_noop!(Uniques::freeze(Origin::signed(2), 0, 42), Error::::NoPermission); assert_noop!(Uniques::thaw(Origin::signed(2), 0, 42), Error::::NoPermission); @@ -205,7 +210,10 @@ fn transfer_owner_should_work() { assert_eq!(Balances::reserved_balance(&1), 0); assert_eq!(Balances::reserved_balance(&2), 2); - assert_noop!(Uniques::transfer_ownership(Origin::signed(1), 0, 1), Error::::NoPermission); + assert_noop!( + Uniques::transfer_ownership(Origin::signed(1), 0, 1), + Error::::NoPermission + ); // Mint and set metadata now and make sure that deposit gets transferred back. assert_ok!(Uniques::set_class_metadata(Origin::signed(2), 0, bvec![0u8; 20], false)); @@ -279,7 +287,10 @@ fn set_class_metadata_should_work() { // Clear Metadata assert_ok!(Uniques::set_class_metadata(Origin::root(), 0, bvec![0u8; 15], false)); - assert_noop!(Uniques::clear_class_metadata(Origin::signed(2), 0), Error::::NoPermission); + assert_noop!( + Uniques::clear_class_metadata(Origin::signed(2), 0), + Error::::NoPermission + ); assert_noop!(Uniques::clear_class_metadata(Origin::signed(1), 1), Error::::Unknown); assert_ok!(Uniques::clear_class_metadata(Origin::signed(1), 0)); assert!(!ClassMetadataOf::::contains_key(0)); @@ -330,7 +341,10 @@ fn set_instance_metadata_should_work() { // Clear Metadata assert_ok!(Uniques::set_metadata(Origin::root(), 0, 42, bvec![0u8; 15], false)); - assert_noop!(Uniques::clear_metadata(Origin::signed(2), 0, 42), Error::::NoPermission); + assert_noop!( + Uniques::clear_metadata(Origin::signed(2), 0, 42), + Error::::NoPermission + ); assert_noop!(Uniques::clear_metadata(Origin::signed(1), 1, 42), Error::::Unknown); assert_ok!(Uniques::clear_metadata(Origin::signed(1), 0, 42)); assert!(!InstanceMetadataOf::::contains_key(0, 42)); @@ -347,26 +361,32 @@ fn set_attribute_should_work() { assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![1], bvec![0])); - assert_eq!(attributes(0), vec![ - (None, bvec![0], bvec![0]), - (Some(0), bvec![0], bvec![0]), - (Some(0), bvec![1], bvec![0]), - ]); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0]), + (Some(0), bvec![0], bvec![0]), + (Some(0), bvec![1], bvec![0]), + ] + ); assert_eq!(Balances::reserved_balance(1), 9); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0; 10])); - assert_eq!(attributes(0), vec![ - (None, bvec![0], bvec![0; 10]), - (Some(0), bvec![0], bvec![0]), - (Some(0), bvec![1], bvec![0]), - ]); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0; 10]), + (Some(0), bvec![0], bvec![0]), + (Some(0), bvec![1], bvec![0]), + ] + ); assert_eq!(Balances::reserved_balance(1), 18); assert_ok!(Uniques::clear_attribute(Origin::signed(1), 0, Some(0), bvec![1])); - assert_eq!(attributes(0), vec![ - (None, bvec![0], bvec![0; 10]), - (Some(0), bvec![0], bvec![0]), - ]); + assert_eq!( + attributes(0), + vec![(None, bvec![0], bvec![0; 10]), (Some(0), bvec![0], bvec![0]),] + ); assert_eq!(Balances::reserved_balance(1), 15); let w = Class::::get(0).unwrap().destroy_witness(); @@ -386,11 +406,14 @@ fn set_attribute_should_respect_freeze() { assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, None, bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(0), bvec![0], bvec![0])); assert_ok!(Uniques::set_attribute(Origin::signed(1), 0, Some(1), bvec![0], bvec![0])); - assert_eq!(attributes(0), vec![ - (None, bvec![0], bvec![0]), - (Some(0), bvec![0], bvec![0]), - (Some(1), bvec![0], bvec![0]), - ]); + assert_eq!( + attributes(0), + vec![ + (None, bvec![0], bvec![0]), + (Some(0), bvec![0], bvec![0]), + (Some(1), bvec![0], bvec![0]), + ] + ); assert_eq!(Balances::reserved_balance(1), 9); assert_ok!(Uniques::set_class_metadata(Origin::signed(1), 0, bvec![], true)); @@ -406,7 +429,7 @@ fn set_attribute_should_respect_freeze() { } #[test] -fn force_asset_status_should_work(){ +fn force_asset_status_should_work() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 100); @@ -418,7 +441,7 @@ fn force_asset_status_should_work(){ assert_ok!(Uniques::set_metadata(Origin::signed(1), 0, 69, bvec![0; 20], false)); assert_eq!(Balances::reserved_balance(1), 65); - //force asset status to be free holding + // force asset status to be free holding assert_ok!(Uniques::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, true, false)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 142, 1)); assert_ok!(Uniques::mint(Origin::signed(1), 0, 169, 2)); @@ -484,13 +507,28 @@ fn cancel_approval_works() { assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); - assert_noop!(Uniques::cancel_approval(Origin::signed(2), 1, 42, None), Error::::Unknown); - assert_noop!(Uniques::cancel_approval(Origin::signed(2), 0, 43, None), Error::::Unknown); - assert_noop!(Uniques::cancel_approval(Origin::signed(3), 0, 42, None), Error::::NoPermission); - assert_noop!(Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(4)), Error::::WrongDelegate); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 1, 42, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 43, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(3), 0, 42, None), + Error::::NoPermission + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(4)), + Error::::WrongDelegate + ); assert_ok!(Uniques::cancel_approval(Origin::signed(2), 0, 42, Some(3))); - assert_noop!(Uniques::cancel_approval(Origin::signed(2), 0, 42, None), Error::::NoDelegate); + assert_noop!( + Uniques::cancel_approval(Origin::signed(2), 0, 42, None), + Error::::NoDelegate + ); }); } @@ -501,12 +539,24 @@ fn cancel_approval_works_with_admin() { assert_ok!(Uniques::mint(Origin::signed(1), 0, 42, 2)); assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); - assert_noop!(Uniques::cancel_approval(Origin::signed(1), 1, 42, None), Error::::Unknown); - assert_noop!(Uniques::cancel_approval(Origin::signed(1), 0, 43, None), Error::::Unknown); - assert_noop!(Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(4)), Error::::WrongDelegate); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 1, 42, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 43, None), + Error::::Unknown + ); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(4)), + Error::::WrongDelegate + ); assert_ok!(Uniques::cancel_approval(Origin::signed(1), 0, 42, Some(3))); - assert_noop!(Uniques::cancel_approval(Origin::signed(1), 0, 42, None), Error::::NoDelegate); + assert_noop!( + Uniques::cancel_approval(Origin::signed(1), 0, 42, None), + Error::::NoDelegate + ); }); } @@ -519,9 +569,15 @@ fn cancel_approval_works_with_force() { assert_ok!(Uniques::approve_transfer(Origin::signed(2), 0, 42, 3)); assert_noop!(Uniques::cancel_approval(Origin::root(), 1, 42, None), Error::::Unknown); assert_noop!(Uniques::cancel_approval(Origin::root(), 0, 43, None), Error::::Unknown); - assert_noop!(Uniques::cancel_approval(Origin::root(), 0, 42, Some(4)), Error::::WrongDelegate); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 0, 42, Some(4)), + Error::::WrongDelegate + ); assert_ok!(Uniques::cancel_approval(Origin::root(), 0, 42, Some(3))); - assert_noop!(Uniques::cancel_approval(Origin::root(), 0, 42, None), Error::::NoDelegate); + assert_noop!( + Uniques::cancel_approval(Origin::root(), 0, 42, None), + Error::::NoDelegate + ); }); } diff --git a/substrate/frame/uniques/src/types.rs b/substrate/frame/uniques/src/types.rs index f73a18c7f3f3d25f9d22cf1878986b42fe85813b..ae61b6b5e1fd3d628de875f0cd6ce88b0babc4a7 100644 --- a/substrate/frame/uniques/src/types.rs +++ b/substrate/frame/uniques/src/types.rs @@ -27,12 +27,8 @@ pub(super) type ClassDetailsFor = pub(super) type InstanceDetailsFor = InstanceDetails<::AccountId, DepositBalanceOf>; - #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct ClassDetails< - AccountId, - DepositBalance, -> { +pub struct ClassDetails { /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. pub(super) owner: AccountId, /// Can mint tokens. diff --git a/substrate/frame/uniques/src/weights.rs b/substrate/frame/uniques/src/weights.rs index a2263d6cd3486b459b194f52d4cc7d461236b39b..0bef1cb5d693321437713a94683dd20fea2ce88f 100644 --- a/substrate/frame/uniques/src/weights.rs +++ b/substrate/frame/uniques/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/utility/src/benchmarking.rs b/substrate/frame/utility/src/benchmarking.rs index 44019e48c1eb42f3073500931f63d8b21589aa27..ae4eb68661ea788b9b666eaf87726dbcf9ba6c23 100644 --- a/substrate/frame/utility/src/benchmarking.rs +++ b/substrate/frame/utility/src/benchmarking.rs @@ -20,8 +20,8 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; const SEED: u32 = 0; @@ -65,8 +65,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Pallet, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/utility/src/lib.rs b/substrate/frame/utility/src/lib.rs index b8170ac8ba002d8e9b733dead54582d2138ceb84..1133bd8698574261c55d44397cc7050e0c7ba647 100644 --- a/substrate/frame/utility/src/lib.rs +++ b/substrate/frame/utility/src/lib.rs @@ -52,36 +52,35 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -mod tests; mod benchmarking; +mod tests; pub mod weights; -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_core::TypeId; -use sp_io::hashing::blake2_256; +use codec::{Decode, Encode}; use frame_support::{ - transactional, - traits::{OriginTrait, UnfilteredDispatchable, IsSubType}, - weights::{GetDispatchInfo, extract_actual_weight}, dispatch::PostDispatchInfo, + traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, + transactional, + weights::{extract_actual_weight, GetDispatchInfo}, }; +use sp_core::TypeId; +use sp_io::hashing::blake2_256; use sp_runtime::traits::Dispatchable; +use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet(_); - /// Configuration trait. #[pallet::config] pub trait Config: frame_system::Config { @@ -89,9 +88,11 @@ pub mod pallet { type Event: From + IsType<::Event>; /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From> - + UnfilteredDispatchable + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + UnfilteredDispatchable + IsSubType> + IsType<::Call>; @@ -170,7 +171,7 @@ pub mod pallet { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. - return Ok(Some(base_weight + weight).into()); + return Ok(Some(base_weight + weight).into()) } } Self::deposit_event(Event::BatchCompleted); @@ -213,13 +214,16 @@ pub mod pallet { let info = call.get_dispatch_info(); let result = call.dispatch(origin); // Always take into account the base weight of this call. - let mut weight = T::WeightInfo::as_derivative().saturating_add(T::DbWeight::get().reads_writes(1, 1)); + let mut weight = T::WeightInfo::as_derivative() + .saturating_add(T::DbWeight::get().reads_writes(1, 1)); // Add the real weight of the dispatch. weight = weight.saturating_add(extract_actual_weight(&result, &info)); - result.map_err(|mut err| { - err.post_info = Some(weight).into(); - err - }).map(|_| Some(weight).into()) + result + .map_err(|mut err| { + err.post_info = Some(weight).into(); + err + }) + .map(|_| Some(weight).into()) } /// Send a batch of dispatch calls and atomically execute them. @@ -291,7 +295,6 @@ pub mod pallet { Ok(Some(base_weight + weight).into()) } } - } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. diff --git a/substrate/frame/utility/src/tests.rs b/substrate/frame/utility/src/tests.rs index aa6bea8a27d36d0c9f3123fcbe470077c3824d76..61890972d3a0361d5a4b0abe9761f3b6c63b25f0 100644 --- a/substrate/frame/utility/src/tests.rs +++ b/substrate/frame/utility/src/tests.rs @@ -21,23 +21,26 @@ use super::*; +use crate as utility; use frame_support::{ - assert_ok, assert_noop, parameter_types, assert_err_ignore_postinfo, decl_module, - weights::{Weight, Pays}, + assert_err_ignore_postinfo, assert_noop, assert_ok, decl_module, dispatch::{DispatchError, DispatchErrorWithPostInfo, Dispatchable}, + parameter_types, storage, traits::Filter, - storage, + weights::{Pays, Weight}, }; use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use crate as utility; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; // example module to test behaviors. pub mod example { use super::*; - use frame_system::ensure_signed; use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; - pub trait Config: frame_system::Config { } + use frame_system::ensure_signed; + pub trait Config: frame_system::Config {} decl_module! { pub struct Module for enum Call where origin: ::Origin { @@ -160,14 +163,15 @@ type ExampleCall = example::Call; type UtilityCall = crate::Call; use frame_system::Call as SystemCall; -use pallet_balances::Call as BalancesCall; -use pallet_balances::Error as BalancesError; +use pallet_balances::{Call as BalancesCall, Error as BalancesError}; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -178,11 +182,14 @@ fn as_derivative_works() { new_test_ext().execute_with(|| { let sub_1_0 = Utility::derivative_account_id(1, 0); assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); - assert_err_ignore_postinfo!(Utility::as_derivative( - Origin::signed(1), - 1, - Box::new(Call::Balances(BalancesCall::transfer(6, 3))), - ), BalancesError::::InsufficientBalance); + assert_err_ignore_postinfo!( + Utility::as_derivative( + Origin::signed(1), + 1, + Box::new(Call::Balances(BalancesCall::transfer(6, 3))), + ), + BalancesError::::InsufficientBalance + ); assert_ok!(Utility::as_derivative( Origin::signed(1), 0, @@ -256,11 +263,14 @@ fn as_derivative_handles_weight_refund() { #[test] fn as_derivative_filters() { new_test_ext().execute_with(|| { - assert_err_ignore_postinfo!(Utility::as_derivative( - Origin::signed(1), - 1, - Box::new(Call::Balances(pallet_balances::Call::transfer_keep_alive(2, 1))), - ), DispatchError::BadOrigin); + assert_err_ignore_postinfo!( + Utility::as_derivative( + Origin::signed(1), + 1, + Box::new(Call::Balances(pallet_balances::Call::transfer_keep_alive(2, 1))), + ), + DispatchError::BadOrigin + ); }); } @@ -272,11 +282,14 @@ fn batch_with_root_works() { assert!(!TestBaseCallFilter::filter(&call)); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - assert_ok!(Utility::batch(Origin::root(), vec![ - Call::Balances(BalancesCall::force_transfer(1, 2, 5)), - Call::Balances(BalancesCall::force_transfer(1, 2, 5)), - call, // Check filters are correctly bypassed - ])); + assert_ok!(Utility::batch( + Origin::root(), + vec![ + Call::Balances(BalancesCall::force_transfer(1, 2, 5)), + Call::Balances(BalancesCall::force_transfer(1, 2, 5)), + call, // Check filters are correctly bypassed + ] + )); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::free_balance(2), 20); assert_eq!(storage::unhashed::get_raw(&k), Some(k)); @@ -288,12 +301,13 @@ fn batch_with_signed_works() { new_test_ext().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ + assert_ok!(Utility::batch( + Origin::signed(1), + vec![ Call::Balances(BalancesCall::transfer(2, 5)), Call::Balances(BalancesCall::transfer(2, 5)) - ]), - ); + ] + ),); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::free_balance(2), 20); }); @@ -302,12 +316,13 @@ fn batch_with_signed_works() { #[test] fn batch_with_signed_filters() { new_test_ext().execute_with(|| { - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::Balances(pallet_balances::Call::transfer_keep_alive(2, 1)) - ]), + assert_ok!(Utility::batch( + Origin::signed(1), + vec![Call::Balances(pallet_balances::Call::transfer_keep_alive(2, 1))] + ),); + System::assert_last_event( + utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into(), ); - System::assert_last_event(utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into()); }); } @@ -316,13 +331,14 @@ fn batch_early_exit_works() { new_test_ext().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ + assert_ok!(Utility::batch( + Origin::signed(1), + vec![ Call::Balances(BalancesCall::transfer(2, 5)), Call::Balances(BalancesCall::transfer(2, 10)), Call::Balances(BalancesCall::transfer(2, 5)), - ]), - ); + ] + ),); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::free_balance(2), 15); }); @@ -381,7 +397,9 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); - System::assert_last_event(utility::Event::BatchInterrupted(1, DispatchError::Other("")).into()); + System::assert_last_event( + utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + ); // No weight is refunded assert_eq!(extract_actual_weight(&result, &info), info.weight); @@ -394,7 +412,9 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); - System::assert_last_event(utility::Event::BatchInterrupted(1, DispatchError::Other("")).into()); + System::assert_last_event( + utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + ); assert_eq!(extract_actual_weight(&result, &info), info.weight - diff * batch_len); // Partial batch completion @@ -405,7 +425,9 @@ fn batch_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(1)); assert_ok!(result); - System::assert_last_event(utility::Event::BatchInterrupted(1, DispatchError::Other("")).into()); + System::assert_last_event( + utility::Event::BatchInterrupted(1, DispatchError::Other("")).into(), + ); assert_eq!( extract_actual_weight(&result, &info), // Real weight is 2 calls at end_weight @@ -419,12 +441,13 @@ fn batch_all_works() { new_test_ext().execute_with(|| { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch_all(Origin::signed(1), vec![ + assert_ok!(Utility::batch_all( + Origin::signed(1), + vec![ Call::Balances(BalancesCall::transfer(2, 5)), Call::Balances(BalancesCall::transfer(2, 5)) - ]), - ); + ] + ),); assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::free_balance(2), 20); }); @@ -439,14 +462,19 @@ fn batch_all_revert() { assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); assert_noop!( - Utility::batch_all(Origin::signed(1), vec![ - Call::Balances(BalancesCall::transfer(2, 5)), - Call::Balances(BalancesCall::transfer(2, 10)), - Call::Balances(BalancesCall::transfer(2, 5)), - ]), + Utility::batch_all( + Origin::signed(1), + vec![ + Call::Balances(BalancesCall::transfer(2, 5)), + Call::Balances(BalancesCall::transfer(2, 10)), + Call::Balances(BalancesCall::transfer(2, 5)), + ] + ), DispatchErrorWithPostInfo { post_info: PostDispatchInfo { - actual_weight: Some(::WeightInfo::batch_all(2) + info.weight * 2), + actual_weight: Some( + ::WeightInfo::batch_all(2) + info.weight * 2 + ), pays_fee: Pays::Yes }, error: pallet_balances::Error::::InsufficientBalance.into() @@ -525,15 +553,11 @@ fn batch_all_handles_weight_refund() { #[test] fn batch_all_does_not_nest() { new_test_ext().execute_with(|| { - let batch_all = Call::Utility( - UtilityCall::batch_all( - vec![ - Call::Balances(BalancesCall::transfer(2, 1)), - Call::Balances(BalancesCall::transfer(2, 1)), - Call::Balances(BalancesCall::transfer(2, 1)), - ] - ) - ); + let batch_all = Call::Utility(UtilityCall::batch_all(vec![ + Call::Balances(BalancesCall::transfer(2, 1)), + Call::Balances(BalancesCall::transfer(2, 1)), + Call::Balances(BalancesCall::transfer(2, 1)), + ])); let info = batch_all.get_dispatch_info(); @@ -557,7 +581,9 @@ fn batch_all_does_not_nest() { // Batch will end with `Ok`, but does not actually execute as we can see from the event // and balances. assert_ok!(Utility::batch_all(Origin::signed(1), vec![batch_nested])); - System::assert_has_event(utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into()); + System::assert_has_event( + utility::Event::BatchInterrupted(0, DispatchError::BadOrigin).into(), + ); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::free_balance(2), 10); }); diff --git a/substrate/frame/utility/src/weights.rs b/substrate/frame/utility/src/weights.rs index 0bab97201008cbd988bf9220b8a7ab4ecd22d002..e098bf2b8a9e3184f54ddb0b66ccec6f17ce8fb5 100644 --- a/substrate/frame/utility/src/weights.rs +++ b/substrate/frame/utility/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/vesting/src/benchmarking.rs b/substrate/frame/vesting/src/benchmarking.rs index 6fd27e18772290a512f1fcb0be6b2532a25647e3..fba4369dba9d3105829df67cd5001dfb5c6de6e0 100644 --- a/substrate/frame/vesting/src/benchmarking.rs +++ b/substrate/frame/vesting/src/benchmarking.rs @@ -21,15 +21,16 @@ use super::*; -use frame_system::{RawOrigin, Pallet as System}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_system::{Pallet as System, RawOrigin}; use sp_runtime::traits::Bounded; use crate::Pallet as Vesting; const SEED: u32 = 0; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; fn add_locks(who: &T::AccountId, n: u8) { for id in 0..n { diff --git a/substrate/frame/vesting/src/lib.rs b/substrate/frame/vesting/src/lib.rs index b53262840f443f132f9debe60cf4ebf08432776c..8a2651a84c647bce7899e39b5ee26da805879df9 100644 --- a/substrate/frame/vesting/src/lib.rs +++ b/substrate/frame/vesting/src/lib.rs @@ -88,14 +88,14 @@ pub struct VestingInfo { pub starting_block: BlockNumber, } -impl< - Balance: AtLeast32BitUnsigned + Copy, - BlockNumber: AtLeast32BitUnsigned + Copy, -> VestingInfo { +impl + VestingInfo +{ /// Amount locked at block `n`. - pub fn locked_at< - BlockNumberToBalance: Convert - >(&self, n: BlockNumber) -> Balance { + pub fn locked_at>( + &self, + n: BlockNumber, + ) -> Balance { // Number of blocks that count toward vesting // Saturating to 0 when n < starting_block let vested_block_count = n.saturating_sub(self.starting_block); @@ -136,12 +136,8 @@ pub mod pallet { /// Information regarding the vesting of a given account. #[pallet::storage] #[pallet::getter(fn vesting)] - pub type Vesting = StorageMap< - _, - Blake2_128Concat, - T::AccountId, - VestingInfo, T::BlockNumber>, - >; + pub type Vesting = + StorageMap<_, Blake2_128Concat, T::AccountId, VestingInfo, T::BlockNumber>>; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -155,9 +151,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - GenesisConfig { - vesting: Default::default(), - } + GenesisConfig { vesting: Default::default() } } } @@ -179,11 +173,7 @@ pub mod pallet { let length_as_balance = T::BlockNumberToBalance::convert(length); let per_block = locked / length_as_balance.max(sp_runtime::traits::One::one()); - Vesting::::insert(who, VestingInfo { - locked: locked, - per_block: per_block, - starting_block: begin - }); + Vesting::::insert(who, VestingInfo { locked, per_block, starting_block: begin }); let reasons = WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE; T::Currency::set_lock(VESTING_ID, who, locked, reasons); } @@ -254,7 +244,10 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::vest_other_locked(MaxLocksOf::::get()) .max(T::WeightInfo::vest_other_unlocked(MaxLocksOf::::get())) )] - pub fn vest_other(origin: OriginFor, target: ::Source) -> DispatchResult { + pub fn vest_other( + origin: OriginFor, + target: ::Source, + ) -> DispatchResult { ensure_signed(origin)?; Self::update_lock(T::Lookup::lookup(target)?) } @@ -287,10 +280,20 @@ pub mod pallet { let who = T::Lookup::lookup(target)?; ensure!(!Vesting::::contains_key(&who), Error::::ExistingVestingSchedule); - T::Currency::transfer(&transactor, &who, schedule.locked, ExistenceRequirement::AllowDeath)?; - - Self::add_vesting_schedule(&who, schedule.locked, schedule.per_block, schedule.starting_block) - .expect("user does not have an existing vesting schedule; q.e.d."); + T::Currency::transfer( + &transactor, + &who, + schedule.locked, + ExistenceRequirement::AllowDeath, + )?; + + Self::add_vesting_schedule( + &who, + schedule.locked, + schedule.per_block, + schedule.starting_block, + ) + .expect("user does not have an existing vesting schedule; q.e.d."); Ok(()) } @@ -326,10 +329,20 @@ pub mod pallet { let source = T::Lookup::lookup(source)?; ensure!(!Vesting::::contains_key(&target), Error::::ExistingVestingSchedule); - T::Currency::transfer(&source, &target, schedule.locked, ExistenceRequirement::AllowDeath)?; - - Self::add_vesting_schedule(&target, schedule.locked, schedule.per_block, schedule.starting_block) - .expect("user does not have an existing vesting schedule; q.e.d."); + T::Currency::transfer( + &source, + &target, + schedule.locked, + ExistenceRequirement::AllowDeath, + )?; + + Self::add_vesting_schedule( + &target, + schedule.locked, + schedule.per_block, + schedule.starting_block, + ) + .expect("user does not have an existing vesting schedule; q.e.d."); Ok(()) } @@ -357,8 +370,9 @@ impl Pallet { } } -impl VestingSchedule for Pallet where - BalanceOf: MaybeSerializeDeserialize + Debug +impl VestingSchedule for Pallet +where + BalanceOf: MaybeSerializeDeserialize + Debug, { type Moment = T::BlockNumber; type Currency = T::Currency; @@ -388,17 +402,15 @@ impl VestingSchedule for Pallet where who: &T::AccountId, locked: BalanceOf, per_block: BalanceOf, - starting_block: T::BlockNumber + starting_block: T::BlockNumber, ) -> DispatchResult { - if locked.is_zero() { return Ok(()) } + if locked.is_zero() { + return Ok(()) + } if Vesting::::contains_key(who) { Err(Error::::ExistingVestingSchedule)? } - let vesting_schedule = VestingInfo { - locked, - per_block, - starting_block - }; + let vesting_schedule = VestingInfo { locked, per_block, starting_block }; Vesting::::insert(who, vesting_schedule); // it can't fail, but even if somehow it did, we don't really care. let res = Self::update_lock(who.clone()); diff --git a/substrate/frame/vesting/src/tests.rs b/substrate/frame/vesting/src/tests.rs index 7c59a61081d3b40d487f67ee99d974f4790b5947..2ee0e83933cb621e8d0760218172089a06d3af3b 100644 --- a/substrate/frame/vesting/src/tests.rs +++ b/substrate/frame/vesting/src/tests.rs @@ -24,336 +24,312 @@ use crate::mock::{Balances, ExtBuilder, System, Test, Vesting}; #[test] fn check_vesting_status() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - let user2_free_balance = Balances::free_balance(&2); - let user12_free_balance = Balances::free_balance(&12); - assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance - assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance - assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance - let user1_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 128, // Vesting over 10 blocks - starting_block: 0, - }; - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule - - // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 - assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); - // Account 2 has their full balance locked - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has only their illiquid funds locked - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(10); - assert_eq!(System::block_number(), 10); - - // Account 1 has fully vested by block 10 - assert_eq!(Vesting::vesting_balance(&1), Some(0)); - // Account 2 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative - assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 - assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 - - }); + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + let user2_free_balance = Balances::free_balance(&2); + let user12_free_balance = Balances::free_balance(&12); + assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance + assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance + assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance + let user1_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 128, // Vesting over 10 blocks + starting_block: 0, + }; + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + let user12_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule + assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule + + // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 + assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); + // Account 2 has their full balance locked + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has only their illiquid funds locked + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + + System::set_block_number(10); + assert_eq!(System::block_number(), 10); + + // Account 1 has fully vested by block 10 + assert_eq!(Vesting::vesting_balance(&1), Some(0)); + // Account 2 has started vesting by block 10 + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has started vesting by block 10 + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative + assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 + assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 + }); } #[test] fn unvested_balance_should_not_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_noop!( - Balances::transfer(Some(1).into(), 2, 56), - pallet_balances::Error::::LiquidityRestrictions, - ); // Account 1 cannot send more than vested amount - }); + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_noop!( + Balances::transfer(Some(1).into(), 2, 56), + pallet_balances::Error::::LiquidityRestrictions, + ); // Account 1 cannot send more than vested amount + }); } #[test] fn vested_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); } #[test] fn vested_balance_should_transfer_using_vest_other() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest_other(Some(2).into(), 1)); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest_other(Some(2).into(), 1)); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); } #[test] fn extra_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); - assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); - - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal - - let user2_free_balance = Balances::free_balance(&2); - assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal - - // Account 1 has only 5 units vested at block 1 (plus 150 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained - - // Account 2 has no units vested at block 1, but gained 100 - assert_eq!(Vesting::vesting_balance(&2), Some(200)); - assert_ok!(Vesting::vest(Some(2).into())); - assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained - }); + ExtBuilder::default().existential_deposit(10).build().execute_with(|| { + assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); + assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal + + let user2_free_balance = Balances::free_balance(&2); + assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal + + // Account 1 has only 5 units vested at block 1 (plus 150 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained + + // Account 2 has no units vested at block 1, but gained 100 + assert_eq!(Vesting::vesting_balance(&2), Some(200)); + assert_ok!(Vesting::vest(Some(2).into())); + assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained + }); } #[test] fn liquid_funds_should_transfer_with_delayed_vesting() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user12_free_balance = Balances::free_balance(&12); - - assert_eq!(user12_free_balance, 2560); // Account 12 has free balance - // Account 12 has liquid funds - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - // Account 12 has delayed vesting - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); - - // Account 12 can still send liquid funds - assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); - }); + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user12_free_balance = Balances::free_balance(&12); + + assert_eq!(user12_free_balance, 2560); // Account 12 has free balance + // Account 12 has liquid funds + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); + + // Account 12 has delayed vesting + let user12_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); + + // Account 12 can still send liquid funds + assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); + }); } #[test] fn vested_transfer_works() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user3_free_balance = Balances::free_balance(&3); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); - // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); - // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); - // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); - // Ensure the transfer happened correctly. - let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); - let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); - - System::set_block_number(20); - assert_eq!(System::block_number(), 20); - - // Account 4 has 5 * 64 units vested by block 20. - assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - // Account 4 has fully vested. - assert_eq!(Vesting::vesting_balance(&4), Some(0)); - }); + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, 256 * 30); + assert_eq!(user4_free_balance, 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, 256 * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, 256 * 45); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested. + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + }); } #[test] fn vested_transfer_correctly_fails() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user2_free_balance = Balances::free_balance(&2); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); - - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, - ); - - // Fails due to too low transfer amount. - let new_vesting_schedule_too_low = VestingInfo { - locked: 256 * 1, - per_block: 64, - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), - Error::::AmountLow, - ); - - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - }); + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + + // The vesting schedule we will try to create, fails due to pre-existence of schedule. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!( + Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), + Error::::ExistingVestingSchedule, + ); + + // Fails due to too low transfer amount. + let new_vesting_schedule_too_low = + VestingInfo { locked: 256 * 1, per_block: 64, starting_block: 10 }; + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), + Error::::AmountLow, + ); + + // Verify no currency transfer happened. + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + }); } #[test] fn force_vested_transfer_works() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user3_free_balance = Balances::free_balance(&3); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); - // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); - // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!(Vesting::force_vested_transfer(Some(4).into(), 3, 4, new_vesting_schedule), BadOrigin); - assert_ok!(Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule)); - // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); - // Ensure the transfer happened correctly. - let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); - let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); - - System::set_block_number(20); - assert_eq!(System::block_number(), 20); - - // Account 4 has 5 * 64 units vested by block 20. - assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - // Account 4 has fully vested. - assert_eq!(Vesting::vesting_balance(&4), Some(0)); - }); + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, 256 * 30); + assert_eq!(user4_free_balance, 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!( + Vesting::force_vested_transfer(Some(4).into(), 3, 4, new_vesting_schedule), + BadOrigin + ); + assert_ok!(Vesting::force_vested_transfer( + RawOrigin::Root.into(), + 3, + 4, + new_vesting_schedule + )); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, 256 * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, 256 * 45); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested. + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + }); } #[test] fn force_vested_transfer_correctly_fails() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user2_free_balance = Balances::free_balance(&2); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); - - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::force_vested_transfer(RawOrigin::Root.into(), 4, 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, - ); - - // Fails due to too low transfer amount. - let new_vesting_schedule_too_low = VestingInfo { - locked: 256 * 1, - per_block: 64, - starting_block: 10, - }; - assert_noop!( - Vesting::force_vested_transfer(RawOrigin::Root.into(), 3, 4, new_vesting_schedule_too_low), - Error::::AmountLow, - ); - - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - }); + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + + // The vesting schedule we will try to create, fails due to pre-existence of schedule. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!( + Vesting::force_vested_transfer(RawOrigin::Root.into(), 4, 2, new_vesting_schedule), + Error::::ExistingVestingSchedule, + ); + + // Fails due to too low transfer amount. + let new_vesting_schedule_too_low = + VestingInfo { locked: 256 * 1, per_block: 64, starting_block: 10 }; + assert_noop!( + Vesting::force_vested_transfer( + RawOrigin::Root.into(), + 3, + 4, + new_vesting_schedule_too_low + ), + Error::::AmountLow, + ); + + // Verify no currency transfer happened. + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + }); } diff --git a/substrate/frame/vesting/src/weights.rs b/substrate/frame/vesting/src/weights.rs index 053453d757f383832f79c08e312c763c2295c563..d180e6828c59eee55682ad4b2aa0f4e28cd8f035 100644 --- a/substrate/frame/vesting/src/weights.rs +++ b/substrate/frame/vesting/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs index 4a8b49049e760adf71a9b39e3f432ab09c6029ff..bae7a40f863903d130b39410f282c0bf04dad563 100644 --- a/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -16,21 +16,25 @@ // limitations under the License. use crate::utils::{ - generate_crate_access, generate_hidden_includes, generate_runtime_mod_name_for_trait, - fold_fn_decl_for_client_side, extract_parameter_names_types_and_borrows, - generate_native_call_generator_fn_name, return_type_extract_type, - generate_method_runtime_api_impl_name, generate_call_api_at_fn_name, prefix_function_with_trait, - replace_wild_card_parameter_names, AllowSelfRefInParameters, + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, + generate_call_api_at_fn_name, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, + generate_runtime_mod_name_for_trait, prefix_function_with_trait, + replace_wild_card_parameter_names, return_type_extract_type, AllowSelfRefInParameters, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, parse::{Parse, ParseStream, Result, Error}, ReturnType, - fold::{self, Fold}, parse_quote, ItemTrait, Generics, GenericParam, Attribute, FnArg, Type, - visit::{Visit, self}, TraitBound, Meta, NestedMeta, Lit, TraitItem, Ident, TraitItemMethod, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + visit::{self, Visit}, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, ReturnType, + TraitBound, TraitItem, TraitItemMethod, Type, }; use std::collections::HashMap; @@ -59,9 +63,8 @@ const CHANGED_IN_ATTRIBUTE: &str = "changed_in"; /// Is used when a trait method was renamed. const RENAMED_ATTRIBUTE: &str = "renamed"; /// All attributes that we support in the declaration of a runtime api trait. -const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = &[ - CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE, -]; +const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = + &[CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, RENAMED_ATTRIBUTE]; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { @@ -94,14 +97,12 @@ fn extend_generics_with_block(generics: &mut Generics) { /// attribute body as `TokenStream`. fn remove_supported_attributes(attrs: &mut Vec) -> HashMap<&'static str, Attribute> { let mut result = HashMap::new(); - attrs.retain(|v| { - match SUPPORTED_ATTRIBUTE_NAMES.iter().find(|a| v.path.is_ident(a)) { - Some(attribute) => { - result.insert(*attribute, v.clone()); - false - }, - None => true, - } + attrs.retain(|v| match SUPPORTED_ATTRIBUTE_NAMES.iter().find(|a| v.path.is_ident(a)) { + Some(attribute) => { + result.insert(*attribute, v.clone()); + false + }, + None => true, }); result @@ -226,16 +227,17 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { ) ) } else { - quote!( Ok(res) ) + quote!(Ok(res)) }; let input_names = params.iter().map(|v| &v.0); // If the type is using the block generic type, we will encode/decode it to make it // compatible. To ensure that we forward it by ref/value, we use the value given by the // the user. Otherwise if it is not using the block, we don't need to add anything. - let input_borrows = params - .iter() - .map(|v| if type_is_using_block(&v.1) { v.2.clone() } else { None }); + let input_borrows = + params + .iter() + .map(|v| if type_is_using_block(&v.1) { v.2.clone() } else { None }); // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect // all the function inputs. @@ -304,28 +306,23 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { ); match meta { - Meta::List(list) => { + Meta::List(list) => if list.nested.len() > 2 && list.nested.is_empty() { err } else { let mut itr = list.nested.iter(); let old_name = match itr.next() { - Some(NestedMeta::Lit(Lit::Str(i))) => { - i.value() - }, + Some(NestedMeta::Lit(Lit::Str(i))) => i.value(), _ => return err, }; let version = match itr.next() { - Some(NestedMeta::Lit(Lit::Int(i))) => { - i.base10_parse()? - }, + Some(NestedMeta::Lit(Lit::Int(i))) => i.base10_parse()?, _ => return err, }; Ok((old_name, version)) - } - }, + }, _ => err, } } @@ -353,23 +350,19 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { fn_.span(), format!( "`{}` and `{}` are not supported at once.", - RENAMED_ATTRIBUTE, - CHANGED_IN_ATTRIBUTE - ) - )); + RENAMED_ATTRIBUTE, CHANGED_IN_ATTRIBUTE + ), + )) } // We do not need to generate this function for a method that signature was changed. if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - continue; + continue } // Parse the renamed attributes. let mut renames = Vec::new(); - if let Some((_, a)) = attrs - .iter() - .find(|a| a.0 == &RENAMED_ATTRIBUTE) - { + if let Some((_, a)) = attrs.iter().find(|a| a.0 == &RENAMED_ATTRIBUTE) { let (old_name, version) = parse_renamed_attribute(a)?; renames.push((version, prefix_function_with_trait(&trait_name, &old_name))); } @@ -381,7 +374,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { versions.push(version); old_names.push(old_name); (versions, old_names) - } + }, ); // Generate the generator function @@ -456,27 +449,32 @@ fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { extend_generics_with_block(&mut decl.generics); let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); let found_attributes = remove_supported_attributes(&mut decl.attrs); - let api_version = get_api_version(&found_attributes).map(|v| { - generate_runtime_api_version(v as u32) - })?; + let api_version = + get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; let id = generate_runtime_api_id(&decl.ident.to_string()); let call_api_at_calls = generate_call_api_at_calls(&decl)?; // Remove methods that have the `changed_in` attribute as they are not required for the // runtime anymore. - decl.items = decl.items.iter_mut().filter_map(|i| match i { - TraitItem::Method(ref mut method) => { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - None - } else { - // Make sure we replace all the wild card parameter names. - replace_wild_card_parameter_names(&mut method.sig); - Some(TraitItem::Method(method.clone())) - } - } - r => Some(r.clone()), - }).collect(); + decl.items = decl + .items + .iter_mut() + .filter_map(|i| match i { + TraitItem::Method(ref mut method) => { + if remove_supported_attributes(&mut method.attrs) + .contains_key(CHANGED_IN_ATTRIBUTE) + { + None + } else { + // Make sure we replace all the wild card parameter names. + replace_wild_card_parameter_names(&mut method.sig); + Some(TraitItem::Method(method.clone())) + } + }, + r => Some(r.clone()), + }) + .collect(); let native_call_generators = generate_native_call_generators(&decl)?; @@ -533,8 +531,10 @@ impl<'a> ToClientSideDecl<'a> { result } - fn fold_trait_item_method(&mut self, method: TraitItemMethod) - -> (TraitItemMethod, Option, TraitItemMethod) { + fn fold_trait_item_method( + &mut self, + method: TraitItemMethod, + ) -> (TraitItemMethod, Option, TraitItemMethod) { let crate_ = self.crate_; let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); let fn_impl = self.create_method_runtime_api_impl(method.clone()); @@ -547,8 +547,9 @@ impl<'a> ToClientSideDecl<'a> { fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { let crate_ = self.crate_; let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!( context )); - fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); + fn_decl_ctx.sig.ident = + Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); fn_decl_ctx.sig.inputs.insert(2, context_arg); fn_decl_ctx @@ -556,9 +557,12 @@ impl<'a> ToClientSideDecl<'a> { /// Takes the given method and creates a `method_runtime_api_impl` method that will be /// implemented in the runtime for the client side. - fn create_method_runtime_api_impl(&mut self, mut method: TraitItemMethod) -> Option { + fn create_method_runtime_api_impl( + &mut self, + mut method: TraitItemMethod, + ) -> Option { if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - return None; + return None } let fn_sig = &method.sig; @@ -566,36 +570,35 @@ impl<'a> ToClientSideDecl<'a> { // Get types and if the value is borrowed from all parameters. // If there is an error, we push it as the block to the user. - let param_types = match extract_parameter_names_types_and_borrows( - fn_sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - } - }; + let param_types = + match extract_parameter_names_types_and_borrows(fn_sig, AllowSelfRefInParameters::No) { + Ok(res) => res + .into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + Err(e) => { + self.errors.push(e.to_compile_error()); + Vec::new() + }, + }; let name = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); let block_id = self.block_id; let crate_ = self.crate_; - Some( - parse_quote!{ - #[doc(hidden)] - fn #name( - &self, - at: &#block_id, - context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; - } - ) + Some(parse_quote! { + #[doc(hidden)] + fn #name( + &self, + at: &#block_id, + context: #crate_::ExecutionContext, + params: Option<( #( #param_types ),* )>, + params_encoded: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; + }) } /// Takes the method declared by the user and creates the declaration we require for the runtime @@ -614,7 +617,7 @@ impl<'a> ToClientSideDecl<'a> { Err(e) => { self.errors.push(e.to_compile_error()); Vec::new() - } + }, }; let params2 = params.clone(); let ret_type = return_type_extract_type(&method.sig.output); @@ -635,7 +638,8 @@ impl<'a> ToClientSideDecl<'a> { Error::new( method.span(), "`changed_in` version can not be greater than the `api_version`", - ).to_compile_error() + ) + .to_compile_error(), ); } @@ -646,49 +650,48 @@ impl<'a> ToClientSideDecl<'a> { method.sig.ident = ident; method.attrs.push(parse_quote!( #[deprecated] )); - let panic = format!("Calling `{}` should not return a native value!", method.sig.ident); - (quote!( panic!(#panic) ), quote!( None )) + let panic = + format!("Calling `{}` should not return a native value!", method.sig.ident); + (quote!(panic!(#panic)), quote!(None)) }, - Ok(None) => (quote!( Ok(n) ), quote!( Some(( #( #params2 ),* )) )), + Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), Err(e) => { self.errors.push(e.to_compile_error()); - (quote!( unimplemented!() ), quote!( None )) - } + (quote!(unimplemented!()), quote!(None)) + }, }; let function_name = method.sig.ident.to_string(); // Generate the default implementation that calls the `method_runtime_api_impl` method. - method.default = Some( - parse_quote! { - { - let runtime_api_impl_params_encoded = - #crate_::Encode::encode(&( #( &#params ),* )); - - self.#name_impl( - __runtime_api_at_param__, - #context, - #param_tuple, - runtime_api_impl_params_encoded, - ).and_then(|r| - match r { - #crate_::NativeOrEncoded::Native(n) => { - #native_handling - }, - #crate_::NativeOrEncoded::Encoded(r) => { - <#ret_type as #crate_::Decode>::decode(&mut &r[..]) - .map_err(|err| - #crate_::ApiError::FailedToDecodeReturnValue { - function: #function_name, - error: err, - } - ) - } + method.default = Some(parse_quote! { + { + let runtime_api_impl_params_encoded = + #crate_::Encode::encode(&( #( &#params ),* )); + + self.#name_impl( + __runtime_api_at_param__, + #context, + #param_tuple, + runtime_api_impl_params_encoded, + ).and_then(|r| + match r { + #crate_::NativeOrEncoded::Native(n) => { + #native_handling + }, + #crate_::NativeOrEncoded::Encoded(r) => { + <#ret_type as #crate_::Decode>::decode(&mut &r[..]) + .map_err(|err| + #crate_::ApiError::FailedToDecodeReturnValue { + function: #function_name, + error: err, + } + ) } - ) - } + } + ) } - ); + }); method } @@ -705,11 +708,7 @@ impl<'a> Fold for ToClientSideDecl<'a> { if is_core_trait { // Add all the supertraits we want to have for `Core`. - input.supertraits = parse_quote!( - 'static - + Send - + Sync - ); + input.supertraits = parse_quote!('static + Send + Sync); } else { // Add the `Core` runtime api as super trait. let crate_ = &self.crate_; @@ -729,24 +728,22 @@ fn parse_runtime_api_version(version: &Attribute) -> Result { let meta = version.parse_meta()?; let err = Err(Error::new( - meta.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ) - ) - ); + meta.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + )); match meta { - Meta::List(list) => { + Meta::List(list) => if list.nested.len() != 1 { err } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { i.base10_parse() } else { err - } - }, + }, _ => err, } } @@ -798,14 +795,18 @@ fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { /// Get changed in version from the user given attribute or `Ok(None)`, if no attribute was given. fn get_changed_in(found_attributes: &HashMap<&'static str, Attribute>) -> Result> { - found_attributes.get(&CHANGED_IN_ATTRIBUTE) + found_attributes + .get(&CHANGED_IN_ATTRIBUTE) .map(|v| parse_runtime_api_version(v).map(Some)) .unwrap_or(Ok(None)) } /// Get the api version from the user given attribute or `Ok(1)`, if no attribute was given. fn get_api_version(found_attributes: &HashMap<&'static str, Attribute>) -> Result { - found_attributes.get(&API_VERSION_ATTRIBUTE).map(parse_runtime_api_version).unwrap_or(Ok(1)) + found_attributes + .get(&API_VERSION_ATTRIBUTE) + .map(parse_runtime_api_version) + .unwrap_or(Ok(1)) } /// Generate the declaration of the trait for the client side. @@ -863,7 +864,10 @@ impl CheckTraitDecl { /// Check that the given method declarations are correct. /// /// Any error is stored in `self.errors`. - fn check_method_declarations<'a>(&mut self, methods: impl Iterator) { + fn check_method_declarations<'a>( + &mut self, + methods: impl Iterator, + ) { let mut method_to_signature_changed = HashMap::>>::new(); methods.into_iter().for_each(|method| { @@ -871,7 +875,10 @@ impl CheckTraitDecl { let changed_in = match get_changed_in(&attributes) { Ok(r) => r, - Err(e) => { self.errors.push(e); return; }, + Err(e) => { + self.errors.push(e); + return + }, }; method_to_signature_changed @@ -912,16 +919,13 @@ impl<'ast> Visit<'ast> for CheckTraitDecl { fn visit_generic_param(&mut self, input: &'ast GenericParam) { match input { - GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ - `decl_runtime_apis!` macro!" - ) - ) - }, - _ => {} + GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ + `decl_runtime_apis!` macro!", + )), + _ => {}, } visit::visit_generic_param(self, input); @@ -930,14 +934,12 @@ impl<'ast> Visit<'ast> for CheckTraitDecl { fn visit_trait_bound(&mut self, input: &'ast TraitBound) { if let Some(last_ident) = input.path.segments.last().map(|v| &v.ident) { if last_ident == "BlockT" || last_ident == BLOCK_GENERIC_IDENT { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ `decl_runtime_apis!` macro! If you try to use a different trait than the \ - substrate `Block` trait, please rename it locally." - ) - ) + substrate `Block` trait, please rename it locally.", + )) } } @@ -965,7 +967,9 @@ pub fn decl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::Tok // Parse all trait declarations let RuntimeApiDecls { decls: api_decls } = parse_macro_input!(input as RuntimeApiDecls); - decl_runtime_apis_impl_inner(&api_decls).unwrap_or_else(|e| e.to_compile_error()).into() + decl_runtime_apis_impl_inner(&api_decls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result { @@ -975,13 +979,11 @@ fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result let runtime_decls = generate_runtime_decls(api_decls)?; let client_side_decls = generate_client_side_decls(api_decls)?; - Ok( - quote!( - #hidden_includes + Ok(quote!( + #hidden_includes - #runtime_decls + #runtime_decls - #client_side_decls - ) - ) + #client_side_decls + )) } diff --git a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs index e81c52bbb0b18424c783856f10f21f78d568f4a6..bc0f027e1efaa07d1dc6c996d97ac21a7708614a 100644 --- a/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -16,12 +16,12 @@ // limitations under the License. use crate::utils::{ - generate_crate_access, generate_hidden_includes, - generate_runtime_mod_name_for_trait, generate_method_runtime_api_impl_name, - extract_parameter_names_types_and_borrows, generate_native_call_generator_fn_name, - return_type_extract_type, generate_call_api_at_fn_name, prefix_function_with_trait, extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - AllowSelfRefInParameters, RequireQualifiedTraitPath, + extract_parameter_names_types_and_borrows, generate_call_api_at_fn_name, generate_crate_access, + generate_hidden_includes, generate_method_runtime_api_impl_name, + generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, + prefix_function_with_trait, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -29,9 +29,12 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, Path, Signature, Attribute, - ImplItem, parse::{Parse, ParseStream, Result, Error}, PathArguments, GenericArgument, TypePath, - fold::{self, Fold}, parse_quote, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + Attribute, GenericArgument, Ident, ImplItem, ItemImpl, Path, PathArguments, Signature, Type, + TypePath, }; use std::collections::HashSet; @@ -66,9 +69,10 @@ fn generate_impl_call( signature: &Signature, runtime: &Type, input: &Ident, - impl_trait: &Path + impl_trait: &Path, ) -> Result { - let params = extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; + let params = + extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; let c = generate_crate_access(HIDDEN_INCLUDES_ID); let fn_name = &signature.ident; @@ -78,27 +82,25 @@ fn generate_impl_call( let ptypes = params.iter().map(|v| &v.1); let pborrow = params.iter().map(|v| &v.2); - Ok( - quote!( - let (#( #pnames ),*) : ( #( #ptypes ),* ) = - match #c::DecodeLimit::decode_all_with_depth_limit( - #c::MAX_EXTRINSIC_DEPTH, - &#input, - ) { - Ok(res) => res, - Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e), - }; - - #[allow(deprecated)] - <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*) - ) - ) + Ok(quote!( + let (#( #pnames ),*) : ( #( #ptypes ),* ) = + match #c::DecodeLimit::decode_all_with_depth_limit( + #c::MAX_EXTRINSIC_DEPTH, + &#input, + ) { + Ok(res) => res, + Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e), + }; + + #[allow(deprecated)] + <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*) + )) } /// Generate all the implementation calls for the given functions. fn generate_impl_calls( impls: &[ItemImpl], - input: &Ident + input: &Ident, ) -> Result)>> { let mut impl_calls = Vec::new(); @@ -113,12 +115,8 @@ fn generate_impl_calls( for item in &impl_.items { if let ImplItem::Method(method) = item { - let impl_call = generate_impl_call( - &method.sig, - &impl_.self_ty, - input, - &impl_trait - )?; + let impl_call = + generate_impl_call(&method.sig, &impl_.self_ty, input, &impl_trait)?; impl_calls.push(( impl_trait_ident.clone(), @@ -137,15 +135,16 @@ fn generate_impl_calls( fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { let data = Ident::new("__sp_api__input_data", Span::call_site()); let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let impl_calls = generate_impl_calls(impls, &data)? - .into_iter() - .map(|(trait_, fn_name, impl_, attrs)| { - let name = prefix_function_with_trait(&trait_, &fn_name); - quote!( - #( #attrs )* - #name => Some(#c::Encode::encode(&{ #impl_ })), - ) - }); + let impl_calls = + generate_impl_calls(impls, &data)? + .into_iter() + .map(|(trait_, fn_name, impl_, attrs)| { + let name = prefix_function_with_trait(&trait_, &fn_name); + quote!( + #( #attrs )* + #name => Some(#c::Encode::encode(&{ #impl_ })), + ) + }); Ok(quote!( #[cfg(feature = "std")] @@ -163,34 +162,33 @@ fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { let input = Ident::new("input", Span::call_site()); let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let impl_calls = generate_impl_calls(impls, &input)? - .into_iter() - .map(|(trait_, fn_name, impl_, attrs)| { - let fn_name = Ident::new( - &prefix_function_with_trait(&trait_, &fn_name), - Span::call_site() - ); - - quote!( - #( #attrs )* - #[cfg(not(feature = "std"))] - #[no_mangle] - pub unsafe fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { - let mut #input = if input_len == 0 { - &[0u8; 0] - } else { - unsafe { - #c::slice::from_raw_parts(input_data, input_len) - } - }; - - #c::init_runtime_logger(); - - let output = (move || { #impl_ })(); - #c::to_substrate_wasm_fn_return_value(&output) - } - ) - }); + let impl_calls = + generate_impl_calls(impls, &input)? + .into_iter() + .map(|(trait_, fn_name, impl_, attrs)| { + let fn_name = + Ident::new(&prefix_function_with_trait(&trait_, &fn_name), Span::call_site()); + + quote!( + #( #attrs )* + #[cfg(not(feature = "std"))] + #[no_mangle] + pub unsafe fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { + let mut #input = if input_len == 0 { + &[0u8; 0] + } else { + unsafe { + #c::slice::from_raw_parts(input_data, input_len) + } + }; + + #c::init_runtime_logger(); + + let output = (move || { #impl_ })(); + #c::to_substrate_wasm_fn_return_value(&output) + } + ) + }); Ok(quote!( #( #impl_calls )* )) } @@ -414,7 +412,6 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { Ok(quote!( #( #impls_prepared )* )) } - /// Auxiliary data structure that is used to convert `impl Api for Runtime` to /// `impl Api for RuntimeApi`. /// This requires us to replace the runtime `Block` with the node `Block`, @@ -430,11 +427,8 @@ struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = if input == *self.runtime_block { - parse_quote!( __SR_API_BLOCK__ ) - } else { - input - }; + let new_ty_path = + if input == *self.runtime_block { parse_quote!(__SR_API_BLOCK__) } else { input }; fold::fold_type_path(self, new_ty_path) } @@ -451,12 +445,18 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { // Generate the access to the native parameters let param_tuple_access = if input.sig.inputs.len() == 1 { - vec![ quote!( p ) ] + vec![quote!(p)] } else { - input.sig.inputs.iter().enumerate().map(|(i, _)| { - let i = syn::Index::from(i); - quote!( p.#i ) - }).collect::>() + input + .sig + .inputs + .iter() + .enumerate() + .map(|(i, _)| { + let i = syn::Index::from(i); + quote!( p.#i ) + }) + .collect::>() }; let (param_types, error) = match extract_parameter_names_types_and_borrows( @@ -464,12 +464,14 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { AllowSelfRefInParameters::No, ) { Ok(res) => ( - res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - None + res.into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + None, ), Err(e) => (Vec::new(), Some(e.to_compile_error())), }; @@ -483,10 +485,8 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { params_encoded: Vec, }; - input.sig.ident = generate_method_runtime_api_impl_name( - &self.impl_trait, - &input.sig.ident, - ); + input.sig.ident = + generate_method_runtime_api_impl_name(&self.impl_trait, &input.sig.ident); let ret_type = return_type_extract_type(&input.sig.output); // Generate the correct return type. @@ -544,43 +544,34 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); // Implement the trait for the `RuntimeApiImpl` - input.self_ty = Box::new( - parse_quote!( RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall> ) - ); + input.self_ty = + Box::new(parse_quote!( RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall> )); + input.generics.params.push(parse_quote!( + __SR_API_BLOCK__: #crate_::BlockT + std::panic::UnwindSafe + + std::panic::RefUnwindSafe + )); input.generics.params.push( - parse_quote!( - __SR_API_BLOCK__: #crate_::BlockT + std::panic::UnwindSafe + - std::panic::RefUnwindSafe - ) - ); - input.generics.params.push( - parse_quote!( RuntimeApiImplCall: #crate_::CallApiAt<__SR_API_BLOCK__> + 'static ) + parse_quote!( RuntimeApiImplCall: #crate_::CallApiAt<__SR_API_BLOCK__> + 'static ), ); let where_clause = input.generics.make_where_clause(); - where_clause.predicates.push( - parse_quote! { - RuntimeApiImplCall::StateBackend: - #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> - } - ); + where_clause.predicates.push(parse_quote! { + RuntimeApiImplCall::StateBackend: + #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> + }); // Require that all types used in the function signatures are unwind safe. extract_all_signature_types(&input.items).iter().for_each(|i| { - where_clause.predicates.push( - parse_quote! { - #i: std::panic::UnwindSafe + std::panic::RefUnwindSafe - } - ); + where_clause.predicates.push(parse_quote! { + #i: std::panic::UnwindSafe + std::panic::RefUnwindSafe + }); }); - where_clause.predicates.push( - parse_quote! { - __SR_API_BLOCK__::Header: std::panic::UnwindSafe + std::panic::RefUnwindSafe - } - ); + where_clause.predicates.push(parse_quote! { + __SR_API_BLOCK__::Header: std::panic::UnwindSafe + std::panic::RefUnwindSafe + }); input.attrs = filter_cfg_attrs(&input.attrs); @@ -650,14 +641,12 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { let span = trait_.span(); if !processed_traits.insert(trait_) { - return Err( - Error::new( - span, - "Two traits with the same name detected! \ + return Err(Error::new( + span, + "Two traits with the same name detected! \ The trait name is used to generate its ID. \ - Please rename one trait at the declaration!" - ) - ) + Please rename one trait at the declaration!", + )) } let id: Path = parse_quote!( #path ID ); @@ -692,7 +681,9 @@ pub fn impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::Tok // Parse all impl blocks let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); - impl_runtime_apis_impl_inner(&api_impls).unwrap_or_else(|e| e.to_compile_error()).into() + impl_runtime_apis_impl_inner(&api_impls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { @@ -704,27 +695,25 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { let wasm_interface = generate_wasm_interface(api_impls)?; let api_impls_for_runtime_api = generate_api_impl_for_runtime_api(api_impls)?; - Ok( - quote!( - #hidden_includes + Ok(quote!( + #hidden_includes - #base_runtime_api + #base_runtime_api - #api_impls_for_runtime + #api_impls_for_runtime - #api_impls_for_runtime_api + #api_impls_for_runtime_api - #runtime_api_versions + #runtime_api_versions - pub mod api { - use super::*; + pub mod api { + use super::*; - #dispatch_impl + #dispatch_impl - #wasm_interface - } - ) - ) + #wasm_interface + } + )) } // Filters all attributes except the cfg ones. diff --git a/substrate/primitives/api/proc-macro/src/lib.rs b/substrate/primitives/api/proc-macro/src/lib.rs index 30767efd41c114b9a74116f5f906644ec89a9c96..b8731d70ca3cf0ee70c5d08fce520ef8c6d348af 100644 --- a/substrate/primitives/api/proc-macro/src/lib.rs +++ b/substrate/primitives/api/proc-macro/src/lib.rs @@ -21,9 +21,9 @@ use proc_macro::TokenStream; +mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; -mod decl_runtime_apis; mod utils; #[proc_macro] diff --git a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 738420615b622719f9eefbe36e4562852ee0c923..77f8a07f85c48174b5f0e61e9755a4e491dedbe6 100644 --- a/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/substrate/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -16,10 +16,10 @@ // limitations under the License. use crate::utils::{ - generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, extract_parameter_names_types_and_borrows, - return_type_extract_type, extract_block_type_from_trait_path, extract_impl_trait, - AllowSelfRefInParameters, RequireQualifiedTraitPath, + extract_block_type_from_trait_path, extract_impl_trait, + extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -27,8 +27,11 @@ use proc_macro2::{Span, TokenStream}; use quote::{quote, quote_spanned}; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, TypePath, parse_quote, - parse::{Parse, ParseStream, Result, Error}, fold::{self, Fold}, Attribute, Pat, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + Attribute, Ident, ItemImpl, Pat, Type, TypePath, }; /// Unique identifier used to make the hidden includes unique for this macro. @@ -62,10 +65,7 @@ impl Parse for RuntimeApiImpls { } /// Implement the `ApiExt` trait and the `Core` runtime api. -fn implement_common_api_traits( - block_type: TypePath, - self_ty: Type, -) -> Result { +fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); Ok(quote!( @@ -168,11 +168,13 @@ fn implement_common_api_traits( /// If the attribute was found, it will be automatically removed from the vec. fn has_advanced_attribute(attributes: &mut Vec) -> bool { let mut found = false; - attributes.retain(|attr| if attr.path.is_ident(ADVANCED_ATTRIBUTE) { - found = true; - false - } else { - true + attributes.retain(|attr| { + if attr.path.is_ident(ADVANCED_ATTRIBUTE) { + found = true; + false + } else { + true + } }); found @@ -214,7 +216,7 @@ fn get_at_param_name( let name = param_names.remove(0); Ok((quote!( #name ), ptype_and_borrows.0)) } else { - Ok((quote!( _ ), default_block_id_type.clone())) + Ok((quote!(_), default_block_id_type.clone())) } } @@ -235,24 +237,27 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { let is_advanced = has_advanced_attribute(&mut input.attrs); let mut errors = Vec::new(); - let (mut param_names, mut param_types_and_borrows) = match extract_parameter_names_types_and_borrows( - &input.sig, - AllowSelfRefInParameters::YesButIgnore, - ) { - Ok(res) => ( - res.iter().map(|v| v.0.clone()).collect::>(), - res.iter().map(|v| { - let ty = &v.1; - let borrow = &v.2; - (quote_spanned!(ty.span() => #borrow #ty ), v.2.is_some()) - }).collect::>(), - ), - Err(e) => { - errors.push(e.to_compile_error()); - - (Default::default(), Default::default()) - } - }; + let (mut param_names, mut param_types_and_borrows) = + match extract_parameter_names_types_and_borrows( + &input.sig, + AllowSelfRefInParameters::YesButIgnore, + ) { + Ok(res) => ( + res.iter().map(|v| v.0.clone()).collect::>(), + res.iter() + .map(|v| { + let ty = &v.1; + let borrow = &v.2; + (quote_spanned!(ty.span() => #borrow #ty ), v.2.is_some()) + }) + .collect::>(), + ), + Err(e) => { + errors.push(e.to_compile_error()); + + (Default::default(), Default::default()) + }, + }; let block_type = &self.block_type; let block_id_type = quote!( &#crate_::BlockId<#block_type> ); @@ -267,8 +272,8 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { Ok(res) => res, Err(e) => { errors.push(e.to_compile_error()); - (quote!( _ ), block_id_type) - } + (quote!(_), block_id_type) + }, }; let param_types = param_types_and_borrows.iter().map(|v| &v.0); @@ -281,10 +286,8 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { _: Vec, }; - input.sig.ident = generate_method_runtime_api_impl_name( - &self.impl_trait, - &input.sig.ident, - ); + input.sig.ident = + generate_method_runtime_api_impl_name(&self.impl_trait, &input.sig.ident); // When using advanced, the user needs to declare the correct return type on its own, // otherwise do it for the user. @@ -360,28 +363,24 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result { + Some(self_ty) => if self_ty == impl_.self_ty { Some(self_ty) } else { - let mut error =Error::new( + let mut error = Error::new( impl_.self_ty.span(), "Self type should not change between runtime apis", ); - error.combine(Error::new( - self_ty.span(), - "First self type found here", - )); + error.combine(Error::new(self_ty.span(), "First self type found here")); return Err(error) - } - }, + }, None => Some(impl_.self_ty.clone()), }; global_block_type = match global_block_type.take() { - Some(global_block_type) => { + Some(global_block_type) => if global_block_type == *block_type { Some(global_block_type) } else { @@ -396,15 +395,11 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Some(block_type.clone()), }; - let mut visitor = FoldRuntimeApiImpl { - block_type, - impl_trait: &impl_trait.ident, - }; + let mut visitor = FoldRuntimeApiImpl { block_type, impl_trait: &impl_trait.ident }; result.push(visitor.fold_item_impl(impl_.clone())); } @@ -421,7 +416,9 @@ pub fn mock_impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro // Parse all impl blocks let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); - mock_impl_runtime_apis_impl_inner(&api_impls).unwrap_or_else(|e| e.to_compile_error()).into() + mock_impl_runtime_apis_impl_inner(&api_impls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn mock_impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { diff --git a/substrate/primitives/api/proc-macro/src/utils.rs b/substrate/primitives/api/proc-macro/src/utils.rs index aa3c69d46a29dd7ab40d012e69a922203de386fb..a3f21638751e9bcccb693f452779be96109e7545 100644 --- a/substrate/primitives/api/proc-macro/src/utils.rs +++ b/substrate/primitives/api/proc-macro/src/utils.rs @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::{ - Result, Ident, Signature, parse_quote, Type, Pat, spanned::Spanned, FnArg, Error, token::And, - ImplItem, ReturnType, PathArguments, Path, GenericArgument, TypePath, ItemImpl, + parse_quote, spanned::Spanned, token::And, Error, FnArg, GenericArgument, Ident, ImplItem, + ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; use quote::quote; @@ -49,18 +49,19 @@ pub fn generate_hidden_includes(unique_id: &'static str) -> TokenStream { Err(e) => { let err = Error::new(Span::call_site(), e).to_compile_error(); quote!( #err ) - } + }, } } /// Generates the access to the `sc_client` crate. pub fn generate_crate_access(unique_id: &'static str) -> TokenStream { if env::var("CARGO_PKG_NAME").unwrap() == "sp-api" { - quote!( sp_api ) + quote!(sp_api) } else { let mod_name = generate_hidden_includes_mod_name(unique_id); quote!( self::#mod_name::sp_api ) - }.into() + } + .into() } /// Generates the name of the module that contains the trait declaration for the runtime. @@ -76,7 +77,7 @@ pub fn generate_method_runtime_api_impl_name(trait_: &Ident, method: &Ident) -> /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &ReturnType) -> Type { match rt { - ReturnType::Default => parse_quote!( () ), + ReturnType::Default => parse_quote!(()), ReturnType::Type(_, ref ty) => *ty.clone(), } } @@ -84,10 +85,13 @@ pub fn return_type_extract_type(rt: &ReturnType) -> Type { /// Replace the `_` (wild card) parameter names in the given signature with unique identifiers. pub fn replace_wild_card_parameter_names(input: &mut Signature) { let mut generated_pattern_counter = 0; - input.inputs.iter_mut().for_each(|arg| if let FnArg::Typed(arg) = arg { - arg.pat = Box::new( - generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter), - ); + input.inputs.iter_mut().for_each(|arg| { + if let FnArg::Typed(arg) = arg { + arg.pat = Box::new(generate_unique_pattern( + (*arg.pat).clone(), + &mut generated_pattern_counter, + )); + } }); } @@ -101,7 +105,7 @@ pub fn fold_fn_decl_for_client_side( // Add `&self, at:& BlockId` as parameters to each function at the beginning. input.inputs.insert(0, parse_quote!( __runtime_api_at_param__: &#block_id )); - input.inputs.insert(0, parse_quote!( &self )); + input.inputs.insert(0, parse_quote!(&self)); // Wrap the output in a `Result` input.output = { @@ -114,10 +118,8 @@ pub fn fold_fn_decl_for_client_side( pub fn generate_unique_pattern(pat: Pat, counter: &mut u32) -> Pat { match pat { Pat::Wild(_) => { - let generated_name = Ident::new( - &format!("__runtime_api_generated_name_{}__", counter), - pat.span(), - ); + let generated_name = + Ident::new(&format!("__runtime_api_generated_name_{}__", counter), pat.span()); *counter += 1; parse_quote!( #generated_name ) @@ -145,26 +147,20 @@ pub fn extract_parameter_names_types_and_borrows( match input { FnArg::Typed(arg) => { let (ty, borrow) = match &*arg.ty { - Type::Reference(t) => { - ((*t.elem).clone(), Some(t.and_token)) - }, - t => { (t.clone(), None) }, + Type::Reference(t) => ((*t.elem).clone(), Some(t.and_token)), + t => (t.clone(), None), }; - let name = generate_unique_pattern( - (*arg.pat).clone(), - &mut generated_pattern_counter, - ); + let name = + generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter); result.push((name, ty, borrow)); }, - FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => { - return Err(Error::new(input.span(), "`self` parameter not supported!")) - }, - FnArg::Receiver(recv) => { + FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => + return Err(Error::new(input.span(), "`self` parameter not supported!")), + FnArg::Receiver(recv) => if recv.mutability.is_some() || recv.reference.is_none() { return Err(Error::new(recv.span(), "Only `&self` is supported!")) - } - }, + }, } } @@ -190,7 +186,8 @@ pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> /// /// If a type is a reference, the inner type is extracted (without the reference). pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { - items.iter() + items + .iter() .filter_map(|i| match i { ImplItem::Method(method) => Some(&method.sig), _ => None, @@ -201,13 +198,17 @@ pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { ReturnType::Type(_, ty) => Some((**ty).clone()), }; - sig.inputs.iter().filter_map(|i| match i { - FnArg::Typed(arg) => Some(&arg.ty), - _ => None, - }).map(|ty| match &**ty { - Type::Reference(t) => (*t.elem).clone(), - _ => (**ty).clone(), - }).chain(ret_ty) + sig.inputs + .iter() + .filter_map(|i| match i { + FnArg::Typed(arg) => Some(&arg.ty), + _ => None, + }) + .map(|ty| match &**ty { + Type::Reference(t) => (*t.elem).clone(), + _ => (**ty).clone(), + }) + .chain(ret_ty) }) .collect() } @@ -223,19 +224,20 @@ pub fn extract_block_type_from_trait_path(trait_: &Path) -> Result<&TypePath> { .ok_or_else(|| Error::new(span, "Empty path not supported"))?; match &generics.arguments { - PathArguments::AngleBracketed(ref args) => { - args.args.first().and_then(|v| match v { + PathArguments::AngleBracketed(ref args) => args + .args + .first() + .and_then(|v| match v { GenericArgument::Type(Type::Path(ref block)) => Some(block), - _ => None - }).ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")) - }, + _ => None, + }) + .ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")), PathArguments::None => { let span = trait_.segments.last().as_ref().unwrap().span(); Err(Error::new(span, "Missing `Block` generic parameter.")) }, - PathArguments::Parenthesized(_) => { - Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) - }, + PathArguments::Parenthesized(_) => + Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")), } } @@ -252,19 +254,20 @@ pub fn extract_impl_trait<'a>( impl_: &'a ItemImpl, require: RequireQualifiedTraitPath, ) -> Result<&'a Path> { - impl_.trait_.as_ref().map(|v| &v.1).ok_or_else( - || Error::new(impl_.span(), "Only implementation of traits are supported!") - ).and_then(|p| { - if p.segments.len() > 1 || matches!(require, RequireQualifiedTraitPath::No) { - Ok(p) - } else { - Err( - Error::new( + impl_ + .trait_ + .as_ref() + .map(|v| &v.1) + .ok_or_else(|| Error::new(impl_.span(), "Only implementation of traits are supported!")) + .and_then(|p| { + if p.segments.len() > 1 || matches!(require, RequireQualifiedTraitPath::No) { + Ok(p) + } else { + Err(Error::new( p.span(), "The implemented trait has to be referenced with a path, \ - e.g. `impl client::Core for Runtime`." - ) - ) - } - }) + e.g. `impl client::Core for Runtime`.", + )) + } + }) } diff --git a/substrate/primitives/api/src/lib.rs b/substrate/primitives/api/src/lib.rs index ea023677adf344c1515f9f9000257ac79e576a32..0ec1c5aeadbbeab2d86a2f63a18efbe69642859e 100644 --- a/substrate/primitives/api/src/lib.rs +++ b/substrate/primitives/api/src/lib.rs @@ -70,13 +70,7 @@ extern crate self as sp_api; #[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_state_machine::{ - OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, InMemoryBackend, -}; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_core::NativeOrEncoded; +pub use codec::{self, Decode, DecodeLimit, Encode}; #[doc(hidden)] #[cfg(feature = "std")] pub use hash_db::Hasher; @@ -84,27 +78,34 @@ pub use hash_db::Hasher; #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; #[doc(hidden)] +#[cfg(feature = "std")] +pub use sp_core::NativeOrEncoded; +use sp_core::OpaqueMetadata; +#[doc(hidden)] +pub use sp_core::{offchain, ExecutionContext}; +#[doc(hidden)] pub use sp_runtime::{ + generic::BlockId, traits::{ - Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, HashFor, NumberFor, - Header as HeaderT, Hash as HashT, + Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, Hash as HashT, HashFor, + Header as HeaderT, NumberFor, }, - generic::BlockId, transaction_validity::TransactionValidity, RuntimeString, TransactionOutcome, + transaction_validity::TransactionValidity, + RuntimeString, TransactionOutcome, }; #[doc(hidden)] -pub use sp_core::{offchain, ExecutionContext}; -#[doc(hidden)] -pub use sp_version::{ApiId, RuntimeVersion, ApisVec, create_apis_vec}; -#[doc(hidden)] -pub use sp_std::{slice, mem}; +#[cfg(feature = "std")] +pub use sp_state_machine::{ + Backend as StateBackend, ChangesTrieState, InMemoryBackend, OverlayedChanges, StorageProof, +}; #[cfg(feature = "std")] use sp_std::result; #[doc(hidden)] -pub use codec::{Encode, Decode, DecodeLimit, self}; -use sp_core::OpaqueMetadata; +pub use sp_std::{mem, slice}; +#[doc(hidden)] +pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; #[cfg(feature = "std")] -use std::{panic::UnwindSafe, cell::RefCell}; - +use std::{cell::RefCell, panic::UnwindSafe}; /// Maximum nesting level for extrinsics. pub const MAX_EXTRINSIC_DEPTH: u32 = 256; @@ -386,18 +387,18 @@ pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash> /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] -pub type StorageTransactionCache = - sp_state_machine::StorageTransactionCache< - >>::Transaction, HashFor, NumberFor - >; +pub type StorageTransactionCache = sp_state_machine::StorageTransactionCache< + >>::Transaction, + HashFor, + NumberFor, +>; #[cfg(feature = "std")] -pub type StorageChanges = - sp_state_machine::StorageChanges< - >>::Transaction, - HashFor, - NumberFor - >; +pub type StorageChanges = sp_state_machine::StorageChanges< + >>::Transaction, + HashFor, + NumberFor, +>; /// Extract the state backend type for a type that implements `ProvideRuntimeApi`. #[cfg(feature = "std")] @@ -463,29 +464,31 @@ pub trait ApiExt { /// Depending on the outcome of the closure, the transaction is committed or rolled-back. /// /// The internal result of the closure is returned afterwards. - fn execute_in_transaction TransactionOutcome, R>( - &self, - call: F, - ) -> R where Self: Sized; + fn execute_in_transaction TransactionOutcome, R>(&self, call: F) -> R + where + Self: Sized; /// Checks if the given api is implemented and versions match. - fn has_api( - &self, - at: &BlockId, - ) -> Result where Self: Sized; + fn has_api(&self, at: &BlockId) -> Result + where + Self: Sized; /// Check if the given api is implemented and the version passes a predicate. fn has_api_with bool>( &self, at: &BlockId, pred: P, - ) -> Result where Self: Sized; + ) -> Result + where + Self: Sized; /// Returns the version of the given api. fn api_version( &self, at: &BlockId, - ) -> Result, ApiError> where Self: Sized; + ) -> Result, ApiError> + where + Self: Sized; /// Start recording all accessed trie nodes for generating proofs. fn record_proof(&mut self); @@ -509,10 +512,9 @@ pub trait ApiExt { backend: &Self::StateBackend, changes_trie_state: Option<&ChangesTrieState, NumberFor>>, parent_hash: Block::Hash, - ) -> Result< - StorageChanges, - String - > where Self: Sized; + ) -> Result, String> + where + Self: Sized; } /// Parameters for [`CallApiAt::call_api_at`]. @@ -557,10 +559,7 @@ pub trait CallApiAt { ) -> Result, ApiError>; /// Returns the runtime version at the given block. - fn runtime_version_at( - &self, - at: &BlockId, - ) -> Result; + fn runtime_version_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. diff --git a/substrate/primitives/api/test/benches/bench.rs b/substrate/primitives/api/test/benches/bench.rs index 20ddbbe7116dc1ca863ec43825dfba55c4a3a170..b3d96a2db6a5680fa714907c54a0fb7c1b2fb9cb 100644 --- a/substrate/primitives/api/test/benches/bench.rs +++ b/substrate/primitives/api/test/benches/bench.rs @@ -15,14 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{Criterion, criterion_group, criterion_main}; -use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, TestClientBuilder, - TestClientBuilderExt, runtime::TestAPI, -}; +use criterion::{criterion_group, criterion_main, Criterion}; +use sp_api::ProvideRuntimeApi; use sp_runtime::generic::BlockId; use sp_state_machine::ExecutionStrategy; -use sp_api::ProvideRuntimeApi; +use substrate_test_runtime_client::{ + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, +}; fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { @@ -58,13 +57,17 @@ fn sp_api_benchmark(c: &mut Criterion) { }); c.bench_function("calling function by function pointer in wasm", |b| { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_indirect_call(&block_id).unwrap()) }); c.bench_function("calling function in wasm", |b| { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_direct_call(&block_id).unwrap()) }); diff --git a/substrate/primitives/api/test/tests/decl_and_impl.rs b/substrate/primitives/api/test/tests/decl_and_impl.rs index 54fb37133f4680a38faad872de0dd917a4e9341c..5eeb2a6a771eddd2040c77b32f80aa745d121096 100644 --- a/substrate/primitives/api/test/tests/decl_and_impl.rs +++ b/substrate/primitives/api/test/tests/decl_and_impl.rs @@ -16,12 +16,13 @@ // limitations under the License. use sp_api::{ - RuntimeApiInfo, decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, - ApiError, - ApiExt, + decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiError, ApiExt, RuntimeApiInfo, }; -use sp_runtime::{traits::{GetNodeBlockType, Block as BlockT}, generic::BlockId}; use sp_core::NativeOrEncoded; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, GetNodeBlockType}, +}; use substrate_test_runtime_client::runtime::Block; /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` @@ -142,16 +143,22 @@ type TestClient = substrate_test_runtime_client::client::Client< #[test] fn test_client_side_function_signature() { - let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<(), ApiError> = - RuntimeApiImpl::::test; - let _something_with_block: - fn(&RuntimeApiImpl, &BlockId, Block) -> Result = - RuntimeApiImpl::::something_with_block; + let _test: fn( + &RuntimeApiImpl, + &BlockId, + u64, + ) -> Result<(), ApiError> = RuntimeApiImpl::::test; + let _something_with_block: fn( + &RuntimeApiImpl, + &BlockId, + Block, + ) -> Result = RuntimeApiImpl::::something_with_block; #[allow(deprecated)] - let _same_name_before_version_2: - fn(&RuntimeApiImpl, &BlockId) -> Result = - RuntimeApiImpl::::same_name_before_version_2; + let _same_name_before_version_2: fn( + &RuntimeApiImpl, + &BlockId, + ) -> Result = RuntimeApiImpl::::same_name_before_version_2; } #[test] @@ -186,9 +193,7 @@ fn check_runtime_api_versions() { fn mock_runtime_api_has_api() { let mock = MockApi { block: None }; - assert!( - mock.has_api::>(&BlockId::Number(0)).unwrap(), - ); + assert!(mock.has_api::>(&BlockId::Number(0)).unwrap(),); assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); } diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index b60c7a09cb616be3b2980ee7be1a2f0735728e1e..b0b14ec1e944e2f95e975ee9af78a91df1f7218b 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -15,21 +15,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_api::{ProvideRuntimeApi, Core}; +use sp_api::{Core, ProvideRuntimeApi}; +use sp_runtime::{ + generic::BlockId, + traits::{HashFor, Header as HeaderT}, +}; +use sp_state_machine::{ + create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionStrategy, +}; use substrate_test_runtime_client::{ prelude::*, + runtime::{Block, DecodeFails, Header, TestAPI, Transfer}, DefaultTestClientBuilderExt, TestClientBuilder, - runtime::{TestAPI, DecodeFails, Transfer, Block, Header}, -}; -use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, HashFor}}; -use sp_state_machine::{ - ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, }; -use sp_consensus::SelectChain; use codec::Encode; use sc_block_builder::BlockBuilderProvider; +use sp_consensus::SelectChain; fn calling_function_with_strat(strat: ExecutionStrategy) { let client = TestClientBuilder::new().set_execution_strategy(strat).build(); @@ -52,7 +54,9 @@ fn calling_wasm_runtime_function() { #[test] #[should_panic(expected = "FailedToConvertParameter { function: \"fail_convert_parameter\"")] fn calling_native_runtime_function_with_non_decodable_parameter() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); runtime_api.fail_convert_parameter(&block_id, DecodeFails::new()).unwrap(); @@ -61,7 +65,9 @@ fn calling_native_runtime_function_with_non_decodable_parameter() { #[test] #[should_panic(expected = "FailedToConvertReturnValue { function: \"fail_convert_return_value\"")] fn calling_native_runtime_function_with_non_decodable_return_value() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); runtime_api.fail_convert_return_value(&block_id).unwrap(); @@ -69,7 +75,9 @@ fn calling_native_runtime_function_with_non_decodable_return_value() { #[test] fn calling_native_runtime_signature_changed_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -78,7 +86,9 @@ fn calling_native_runtime_signature_changed_function() { #[test] fn calling_wasm_runtime_signature_changed_old_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -103,10 +113,11 @@ fn calling_with_both_strategy_and_fail_on_native_should_work() { assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); } - #[test] fn calling_with_native_else_wasm_and_fail_on_wasm_should_work() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeElseWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeElseWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); assert_eq!(runtime_api.fail_on_wasm(&block_id).unwrap(), 1); @@ -114,7 +125,9 @@ fn calling_with_native_else_wasm_and_fail_on_wasm_should_work() { #[test] fn calling_with_native_else_wasm_and_fail_on_native_should_work() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeElseWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeElseWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); @@ -122,7 +135,9 @@ fn calling_with_native_else_wasm_and_fail_on_native_should_work() { #[test] fn use_trie_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); assert_eq!(runtime_api.use_trie(&block_id).unwrap(), 2); @@ -133,10 +148,18 @@ fn initialize_block_works() { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.initialize_block( - &block_id, - &Header::new(1, Default::default(), Default::default(), Default::default(), Default::default()), - ).unwrap(); + runtime_api + .initialize_block( + &block_id, + &Header::new( + 1, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + ) + .unwrap(); assert_eq!(runtime_api.get_block_number(&block_id).unwrap(), 1); } @@ -165,7 +188,8 @@ fn record_proof_works() { nonce: 0, from: AccountKeyring::Alice.into(), to: Default::default(), - }.into_signed_tx(); + } + .into_signed_tx(); // Build the block and record proof let mut builder = client @@ -177,15 +201,12 @@ fn record_proof_works() { let backend = create_proof_check_backend::>( storage_root, proof.expect("Proof was generated"), - ).expect("Creates proof backend."); + ) + .expect("Creates proof backend."); // Use the proof backend to execute `execute_block`. let mut overlay = Default::default(); - let executor = NativeExecutor::::new( - WasmExecutionMethod::Interpreted, - None, - 8, - ); + let executor = NativeExecutor::::new(WasmExecutionMethod::Interpreted, None, 8); execution_proof_check_on_trie_backend::<_, u64, _, _>( &backend, &mut overlay, @@ -194,7 +215,8 @@ fn record_proof_works() { "Core_execute_block", &block.encode(), &runtime_code, - ).expect("Executes block while using the proof backend"); + ) + .expect("Executes block while using the proof backend"); } #[test] @@ -203,7 +225,8 @@ fn call_runtime_api_with_multiple_arguments() { let data = vec![1, 2, 4, 5, 6, 7, 8, 8, 10, 12]; let block_id = BlockId::Number(client.chain_info().best_number); - client.runtime_api() + client + .runtime_api() .test_multiple_arguments(&block_id, data.clone(), data.clone(), data.len() as u32) .unwrap(); } @@ -213,8 +236,8 @@ fn disable_logging_works() { if std::env::var("RUN_TEST").is_ok() { sp_tracing::try_init_simple(); - let mut builder = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm); + let mut builder = + TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm); builder.genesis_init_mut().set_wasm_code( substrate_test_runtime_client::runtime::wasm_binary_logging_disabled_unwrap().to_vec(), ); diff --git a/substrate/primitives/application-crypto/src/ecdsa.rs b/substrate/primitives/application-crypto/src/ecdsa.rs index fe54dab39eef850db2bba6e633f42f46d6a9b531..915e16ba3b1a2aca9ae340b147b8a5dfa6f537ce 100644 --- a/substrate/primitives/application-crypto/src/ecdsa.rs +++ b/substrate/primitives/application-crypto/src/ecdsa.rs @@ -17,7 +17,7 @@ //! Ecdsa crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; @@ -33,9 +33,9 @@ mod app { } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/ed25519.rs b/substrate/primitives/application-crypto/src/ed25519.rs index 98eb4727df63ecb27f73c0b276dd737099700ab5..09ce48fcb274c983aa7c253a1d590313f3af35af 100644 --- a/substrate/primitives/application-crypto/src/ed25519.rs +++ b/substrate/primitives/application-crypto/src/ed25519.rs @@ -17,7 +17,7 @@ //! Ed25519 crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; @@ -33,9 +33,9 @@ mod app { } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/lib.rs b/substrate/primitives/application-crypto/src/lib.rs index ca175ddbed915c53658cbc8c9ded77fbca12f6ed..95b8c1f11f80c9a61858c9f88dd6e3bccf9a4899 100644 --- a/substrate/primitives/application-crypto/src/lib.rs +++ b/substrate/primitives/application-crypto/src/lib.rs @@ -18,15 +18,18 @@ //! Traits and macros for constructing application specific strongly typed crypto wrappers. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] -#[doc(hidden)] -pub use sp_core::{self, crypto::{CryptoType, CryptoTypePublicPair, Public, Derive, IsWrappedBy, Wraps}, RuntimeDebug}; +pub use sp_core::crypto::{key_types, CryptoTypeId, KeyTypeId}; #[doc(hidden)] #[cfg(feature = "full_crypto")] -pub use sp_core::crypto::{SecretStringError, DeriveJunction, Ss58Codec, Pair}; -pub use sp_core::crypto::{KeyTypeId, CryptoTypeId, key_types}; +pub use sp_core::crypto::{DeriveJunction, Pair, SecretStringError, Ss58Codec}; +#[doc(hidden)] +pub use sp_core::{ + self, + crypto::{CryptoType, CryptoTypePublicPair, Derive, IsWrappedBy, Public, Wraps}, + RuntimeDebug, +}; #[doc(hidden)] pub use codec; @@ -34,15 +37,11 @@ pub use codec; #[cfg(feature = "std")] pub use serde; #[doc(hidden)] -pub use sp_std::{ - convert::TryFrom, - ops::Deref, - vec::Vec, -}; +pub use sp_std::{convert::TryFrom, ops::Deref, vec::Vec}; +pub mod ecdsa; pub mod ed25519; pub mod sr25519; -pub mod ecdsa; mod traits; pub use traits::*; @@ -51,7 +50,7 @@ pub use traits::*; /// Application-specific types whose identifier is `$key_type`. /// /// ```rust -///# use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; +/// # use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; /// // Declare a new set of crypto types using Ed25519 logic that identifies as `KeyTypeId` /// // of value `b"fuba"`. /// app_crypto!(ed25519, KeyTypeId(*b"_uba")); @@ -61,8 +60,17 @@ pub use traits::*; macro_rules! app_crypto { ($module:ident, $key_type:expr) => { $crate::app_crypto_public_full_crypto!($module::Public, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_signature_full_crypto!($module::Signature, $key_type, $module::CRYPTO_ID); + $crate::app_crypto_public_common!( + $module::Public, + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); + $crate::app_crypto_signature_full_crypto!( + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); $crate::app_crypto_signature_common!($module::Signature, $key_type); $crate::app_crypto_pair!($module::Pair, $key_type, $module::CRYPTO_ID); }; @@ -72,7 +80,7 @@ macro_rules! app_crypto { /// Application-specific types whose identifier is `$key_type`. /// /// ```rust -///# use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; +/// # use sp_application_crypto::{app_crypto, wrap, ed25519, KeyTypeId}; /// // Declare a new set of crypto types using Ed25519 logic that identifies as `KeyTypeId` /// // of value `b"fuba"`. /// app_crypto!(ed25519, KeyTypeId(*b"_uba")); @@ -82,8 +90,17 @@ macro_rules! app_crypto { macro_rules! app_crypto { ($module:ident, $key_type:expr) => { $crate::app_crypto_public_not_full_crypto!($module::Public, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type, $module::CRYPTO_ID); - $crate::app_crypto_signature_not_full_crypto!($module::Signature, $key_type, $module::CRYPTO_ID); + $crate::app_crypto_public_common!( + $module::Public, + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); + $crate::app_crypto_signature_not_full_crypto!( + $module::Signature, + $key_type, + $module::CRYPTO_ID + ); $crate::app_crypto_signature_common!($module::Signature, $key_type); }; } @@ -93,7 +110,7 @@ macro_rules! app_crypto { #[macro_export] macro_rules! app_crypto_pair { ($pair:ty, $key_type:expr, $crypto_type:expr) => { - $crate::wrap!{ + $crate::wrap! { /// A generic `AppPublic` wrapper type over $pair crypto; this has no specific App. #[derive(Clone)] pub struct Pair($pair); @@ -111,12 +128,16 @@ macro_rules! app_crypto_pair { $crate::app_crypto_pair_functions_if_std!($pair); - fn derive< - Iter: Iterator - >(&self, path: Iter, seed: Option) -> Result<(Self, Option), Self::DeriveError> { + fn derive>( + &self, + path: Iter, + seed: Option, + ) -> Result<(Self, Option), Self::DeriveError> { self.0.derive(path, seed).map(|x| (Self(x.0), x.1)) } - fn from_seed(seed: &Self::Seed) -> Self { Self(<$pair>::from_seed(seed)) } + fn from_seed(seed: &Self::Seed) -> Self { + Self(<$pair>::from_seed(seed)) + } fn from_seed_slice(seed: &[u8]) -> Result { <$pair>::from_seed_slice(seed).map(Self) } @@ -137,8 +158,12 @@ macro_rules! app_crypto_pair { ) -> bool { <$pair>::verify_weak(sig, message, pubkey) } - fn public(&self) -> Self::Public { Public(self.0.public()) } - fn to_raw_vec(&self) -> $crate::Vec { self.0.to_raw_vec() } + fn public(&self) -> Self::Public { + Public(self.0.public()) + } + fn to_raw_vec(&self) -> $crate::Vec { + self.0.to_raw_vec() + } } impl $crate::AppKey for Pair { @@ -167,22 +192,22 @@ macro_rules! app_crypto_pair_functions_if_std { (Self(r.0), r.1, r.2) } - fn from_phrase(phrase: &str, password: Option<&str>) - -> Result<(Self, Self::Seed), $crate::SecretStringError> - { + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), $crate::SecretStringError> { <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) } - } + }; } #[doc(hidden)] #[cfg(not(feature = "std"))] #[macro_export] macro_rules! app_crypto_pair_functions_if_std { - ($pair:ty) => {} + ($pair:ty) => {}; } - /// Declares Public type which is functionally equivalent to `$public`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature @@ -191,7 +216,7 @@ macro_rules! app_crypto_pair_functions_if_std { #[macro_export] macro_rules! app_crypto_public_full_crypto { ($public:ty, $key_type:expr, $crypto_type:expr) => { - $crate::wrap!{ + $crate::wrap! { /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. #[derive( Clone, Default, Eq, Hash, PartialEq, PartialOrd, Ord, @@ -216,7 +241,7 @@ macro_rules! app_crypto_public_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Public type which is functionally equivalent to `$public`, but is new @@ -227,7 +252,7 @@ macro_rules! app_crypto_public_full_crypto { #[macro_export] macro_rules! app_crypto_public_not_full_crypto { ($public:ty, $key_type:expr, $crypto_type:expr) => { - $crate::wrap!{ + $crate::wrap! { /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. #[derive( Clone, Default, Eq, PartialEq, Ord, PartialOrd, @@ -247,7 +272,7 @@ macro_rules! app_crypto_public_not_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Public type which is functionally equivalent to `$public`, but is new @@ -260,15 +285,21 @@ macro_rules! app_crypto_public_common { $crate::app_crypto_public_common_if_std!(); impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { self.0.as_ref() } + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } } impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { self.0.as_mut() } + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } } impl $crate::Public for Public { - fn from_slice(x: &[u8]) -> Self { Self(<$public>::from_slice(x)) } + fn from_slice(x: &[u8]) -> Self { + Self(<$public>::from_slice(x)) + } fn to_public_crypto_pair(&self) -> $crate::CryptoTypePublicPair { $crate::CryptoTypePublicPair($crypto_type, self.to_raw_vec()) @@ -279,14 +310,20 @@ macro_rules! app_crypto_public_common { type Generic = $public; } - impl $crate::RuntimeAppPublic for Public where $public: $crate::RuntimePublic { + impl $crate::RuntimeAppPublic for Public + where + $public: $crate::RuntimePublic, + { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; type Signature = Signature; fn all() -> $crate::Vec { - <$public as $crate::RuntimePublic>::all($key_type).into_iter().map(Self).collect() + <$public as $crate::RuntimePublic>::all($key_type) + .into_iter() + .map(Self) + .collect() } fn generate_pair(seed: Option<$crate::Vec>) -> Self { @@ -294,11 +331,8 @@ macro_rules! app_crypto_public_common { } fn sign>(&self, msg: &M) -> Option { - <$public as $crate::RuntimePublic>::sign( - self.as_ref(), - $key_type, - msg, - ).map(Signature) + <$public as $crate::RuntimePublic>::sign(self.as_ref(), $key_type, msg) + .map(Signature) } fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { @@ -318,10 +352,7 @@ macro_rules! app_crypto_public_common { impl From<&Public> for $crate::CryptoTypePublicPair { fn from(key: &Public) -> Self { - $crate::CryptoTypePublicPair( - $crypto_type, - $crate::Public::to_raw_vec(key), - ) + $crate::CryptoTypePublicPair($crypto_type, $crate::Public::to_raw_vec(key)) } } @@ -332,7 +363,7 @@ macro_rules! app_crypto_public_common { <$public>::try_from(data).map(Into::into) } } - } + }; } /// Implements traits for the public key type if `feature = "std"` is enabled. @@ -342,8 +373,9 @@ macro_rules! app_crypto_public_common { macro_rules! app_crypto_public_common_if_std { () => { impl $crate::Derive for Public { - fn derive>(&self, - path: Iter + fn derive>( + &self, + path: Iter, ) -> Option { self.0.derive(path).map(Self) } @@ -357,8 +389,9 @@ macro_rules! app_crypto_public_common_if_std { } impl $crate::serde::Serialize for Public { - fn serialize(&self, serializer: S) -> std::result::Result where - S: $crate::serde::Serializer + fn serialize(&self, serializer: S) -> std::result::Result + where + S: $crate::serde::Serializer, { use $crate::Ss58Codec; serializer.serialize_str(&self.to_ss58check()) @@ -366,15 +399,16 @@ macro_rules! app_crypto_public_common_if_std { } impl<'de> $crate::serde::Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> std::result::Result where - D: $crate::serde::Deserializer<'de> + fn deserialize(deserializer: D) -> std::result::Result + where + D: $crate::serde::Deserializer<'de>, { use $crate::Ss58Codec; Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| $crate::serde::de::Error::custom(format!("{:?}", e))) } } - } + }; } #[cfg(not(feature = "std"))] @@ -383,10 +417,9 @@ macro_rules! app_crypto_public_common_if_std { macro_rules! app_crypto_public_common_if_std { () => { impl $crate::Derive for Public {} - } + }; } - /// Declares Signature type which is functionally equivalent to `$sig`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature @@ -418,7 +451,7 @@ macro_rules! app_crypto_signature_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Signature type which is functionally equivalent to `$sig`, but is new @@ -448,7 +481,7 @@ macro_rules! app_crypto_signature_not_full_crypto { const ID: $crate::KeyTypeId = $key_type; const CRYPTO_ID: $crate::CryptoTypeId = $crypto_type; } - } + }; } /// Declares Signature type which is functionally equivalent to `$sig`, but is new @@ -461,11 +494,15 @@ macro_rules! app_crypto_signature_common { impl $crate::Deref for Signature { type Target = [u8]; - fn deref(&self) -> &Self::Target { self.0.as_ref() } + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } } impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { self.0.as_ref() } + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } } impl $crate::AppSignature for Signature { @@ -479,7 +516,7 @@ macro_rules! app_crypto_signature_common { Ok(<$sig>::try_from(data.as_slice())?.into()) } } - } + }; } /// Implement bidirectional `From` and on-way `AsRef`/`AsMut` for two types, `$inner` and `$outer`. @@ -547,10 +584,9 @@ macro_rules! with_pair { } } - #[doc(hidden)] #[macro_export] #[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] macro_rules! with_pair { - ( $( $def:tt )* ) => {} + ( $( $def:tt )* ) => {}; } diff --git a/substrate/primitives/application-crypto/src/sr25519.rs b/substrate/primitives/application-crypto/src/sr25519.rs index f3ce867858339c21fca76ec906b99a30eef91f75..f51236f2ab3847611c2add2f35864056bf5ec096 100644 --- a/substrate/primitives/application-crypto/src/sr25519.rs +++ b/substrate/primitives/application-crypto/src/sr25519.rs @@ -17,7 +17,7 @@ //! Sr25519 crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; @@ -33,9 +33,9 @@ mod app { } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { type Signature = Signature; diff --git a/substrate/primitives/application-crypto/src/traits.rs b/substrate/primitives/application-crypto/src/traits.rs index 8daa866af63ed879b8881b3b5f417611ebb82ec3..2f7fd139c018742c666310bd35143200adafc5c8 100644 --- a/substrate/primitives/application-crypto/src/traits.rs +++ b/substrate/primitives/application-crypto/src/traits.rs @@ -19,7 +19,7 @@ use sp_core::crypto::Pair; use codec::Codec; -use sp_core::crypto::{KeyTypeId, CryptoType, CryptoTypeId, IsWrappedBy, Public}; +use sp_core::crypto::{CryptoType, CryptoTypeId, IsWrappedBy, KeyTypeId, Public}; use sp_std::{fmt::Debug, vec::Vec}; /// An application-specific key. @@ -57,7 +57,7 @@ impl MaybeHash for T {} /// Type which implements Debug and Hash in std, not when no-std (no-std variant with crypto). #[cfg(all(not(feature = "std"), feature = "full_crypto"))] -pub trait MaybeDebugHash: sp_std::hash::Hash {} +pub trait MaybeDebugHash: sp_std::hash::Hash {} #[cfg(all(not(feature = "std"), feature = "full_crypto"))] impl MaybeDebugHash for T {} @@ -66,15 +66,23 @@ pub trait AppPublic: AppKey + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec { /// The wrapped type which is just a plain instance of `Public`. - type Generic: - IsWrappedBy + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec; + type Generic: IsWrappedBy + + Public + + Ord + + PartialOrd + + Eq + + PartialEq + + Debug + + MaybeHash + + codec::Codec; } /// A application's key pair. #[cfg(feature = "full_crypto")] -pub trait AppPair: AppKey + Pair::Public> { +pub trait AppPair: AppKey + Pair::Public> { /// The wrapped type which is just a plain instance of `Pair`. - type Generic: IsWrappedBy + Pair::Public as AppPublic>::Generic>; + type Generic: IsWrappedBy + + Pair::Public as AppPublic>::Generic>; } /// A application's signature. diff --git a/substrate/primitives/application-crypto/test/src/ecdsa.rs b/substrate/primitives/application-crypto/test/src/ecdsa.rs index 5ad10e79ef96ff13c78bf450215ac04a03279f77..c4aa6a2afbd614e157e3ee0e987bf3c3ce88f299 100644 --- a/substrate/primitives/application-crypto/test/src/ecdsa.rs +++ b/substrate/primitives/application-crypto/test/src/ecdsa.rs @@ -16,28 +16,22 @@ // limitations under the License. //! Integration tests for ecdsa -use std::sync::Arc; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::ecdsa::{AppPair, AppPublic}; +use sp_core::{crypto::Pair, testing::ECDSA}; +use sp_keystore::{testing::KeyStore, SyncCryptoStore}; use sp_runtime::generic::BlockId; -use sp_core::{ - crypto::Pair, - testing::ECDSA, -}; -use sp_keystore::{ - SyncCryptoStore, - testing::KeyStore, -}; +use std::sync::Arc; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::ecdsa::{AppPair, AppPublic}; #[test] fn ecdsa_works_in_runtime() { let keystore = Arc::new(KeyStore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() + let (signature, public) = test_client + .runtime_api() .test_ecdsa_crypto(&BlockId::Number(0)) .expect("Tests `ecdsa` crypto."); diff --git a/substrate/primitives/application-crypto/test/src/ed25519.rs b/substrate/primitives/application-crypto/test/src/ed25519.rs index 06b962f1902bcfc129c0b5b4659ca9465e154e54..7cfd801388c788012c69b666706978170e18ea46 100644 --- a/substrate/primitives/application-crypto/test/src/ed25519.rs +++ b/substrate/primitives/application-crypto/test/src/ed25519.rs @@ -17,28 +17,22 @@ //! Integration tests for ed25519 -use std::sync::Arc; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::ed25519::{AppPair, AppPublic}; +use sp_core::{crypto::Pair, testing::ED25519}; +use sp_keystore::{testing::KeyStore, SyncCryptoStore}; use sp_runtime::generic::BlockId; -use sp_core::{ - crypto::Pair, - testing::ED25519, -}; -use sp_keystore::{ - SyncCryptoStore, - testing::KeyStore, -}; +use std::sync::Arc; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::ed25519::{AppPair, AppPublic}; #[test] fn ed25519_works_in_runtime() { let keystore = Arc::new(KeyStore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() + let (signature, public) = test_client + .runtime_api() .test_ed25519_crypto(&BlockId::Number(0)) .expect("Tests `ed25519` crypto."); diff --git a/substrate/primitives/application-crypto/test/src/lib.rs b/substrate/primitives/application-crypto/test/src/lib.rs index bee926f8dd8c1ab56fe5c3d703ac3a3576141e1a..6b7734764e793bbfa03c2fc8741eac6723821ddd 100644 --- a/substrate/primitives/application-crypto/test/src/lib.rs +++ b/substrate/primitives/application-crypto/test/src/lib.rs @@ -17,9 +17,9 @@ //! Integration tests for application crypto +#[cfg(test)] +mod ecdsa; #[cfg(test)] mod ed25519; #[cfg(test)] mod sr25519; -#[cfg(test)] -mod ecdsa; diff --git a/substrate/primitives/application-crypto/test/src/sr25519.rs b/substrate/primitives/application-crypto/test/src/sr25519.rs index 889f662b68140b66ba0934cb2d57c37f72409540..12dfbc609fb017832b89f66ee6f4640b2a1522ed 100644 --- a/substrate/primitives/application-crypto/test/src/sr25519.rs +++ b/substrate/primitives/application-crypto/test/src/sr25519.rs @@ -17,28 +17,22 @@ //! Integration tests for sr25519 -use std::sync::Arc; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::sr25519::{AppPair, AppPublic}; +use sp_core::{crypto::Pair, testing::SR25519}; +use sp_keystore::{testing::KeyStore, SyncCryptoStore}; use sp_runtime::generic::BlockId; -use sp_core::{ - crypto::Pair, - testing::SR25519, -}; -use sp_keystore::{ - SyncCryptoStore, - testing::KeyStore, -}; +use std::sync::Arc; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::sr25519::{AppPair, AppPublic}; #[test] fn sr25519_works_in_runtime() { let keystore = Arc::new(KeyStore::new()); let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() + let (signature, public) = test_client + .runtime_api() .test_sr25519_crypto(&BlockId::Number(0)) .expect("Tests `sr25519` crypto."); diff --git a/substrate/primitives/arithmetic/benches/bench.rs b/substrate/primitives/arithmetic/benches/bench.rs index fd535c1d2d0ff25e85e3067e923e4e29e2bdb389..02db00aa0bf82d5db4839d867cf24d0c332c1354 100644 --- a/substrate/primitives/arithmetic/benches/bench.rs +++ b/substrate/primitives/arithmetic/benches/bench.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{Criterion, Throughput, BenchmarkId, criterion_group, criterion_main}; -use sp_arithmetic::biguint::{BigUint, Single}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; use rand::Rng; +use sp_arithmetic::biguint::{BigUint, Single}; fn random_big_uint(size: usize) -> BigUint { let mut rng = rand::thread_rng(); @@ -73,7 +73,7 @@ fn bench_division(c: &mut Criterion) { } } -criterion_group!{ +criterion_group! { name = benches; config = Criterion::default(); targets = bench_addition, bench_subtraction, bench_multiplication, bench_division diff --git a/substrate/primitives/arithmetic/fuzzer/src/biguint.rs b/substrate/primitives/arithmetic/fuzzer/src/biguint.rs index 57be7f5342043c9816495d86c761dbd7fe428e48..ca5b8379afff509249221ee7de29239ef487f6c8 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/biguint.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/biguint.rs @@ -60,8 +60,13 @@ fn main() { let expected = ue.unwrap() + ve.unwrap(); let t = u.clone().add(&v); assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} + {:?} ===> {:?} != {:?}", u, v, t, expected, + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} + {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, ); } @@ -74,8 +79,13 @@ fn main() { let t = t.unwrap(); let expected = expected.unwrap(); assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} - {:?} ===> {:?} != {:?}", u, v, t, expected, + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} - {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, ); } } @@ -84,31 +94,51 @@ fn main() { let expected = ue.unwrap() * ve.unwrap(); let t = u.clone().mul(&v); assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} * {:?} ===> {:?} != {:?}", u, v, t, expected, + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} * {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, ); } if check_digit_lengths(&u, &v, 4) { let (ue, ve) = (ue.unwrap(), ve.unwrap()); if ve == 0 { - return; + return } let (q, r) = (ue / ve, ue % ve); if let Some((qq, rr)) = u.clone().div(&v, true) { assert_eq!( - u128::try_from(qq.clone()).unwrap(), q, - "{:?} / {:?} ===> {:?} != {:?}", u, v, qq, q, + u128::try_from(qq.clone()).unwrap(), + q, + "{:?} / {:?} ===> {:?} != {:?}", + u, + v, + qq, + q, ); assert_eq!( - u128::try_from(rr.clone()).unwrap(), r, - "{:?} % {:?} ===> {:?} != {:?}", u, v, rr, r, + u128::try_from(rr.clone()).unwrap(), + r, + "{:?} % {:?} ===> {:?} != {:?}", + u, + v, + rr, + r, ); } else if v.len() == 1 { let qq = u.clone().div_unit(ve as Single); assert_eq!( - u128::try_from(qq.clone()).unwrap(), q, - "[single] {:?} / {:?} ===> {:?} != {:?}", u, v, qq, q, + u128::try_from(qq.clone()).unwrap(), + q, + "[single] {:?} / {:?} ===> {:?} != {:?}", + u, + v, + qq, + q, ); } else if v.msb() != 0 && u.msb() != 0 && u.len() > v.len() { panic!("div returned none for an unexpected reason"); @@ -175,7 +205,7 @@ fn assert_biguints_eq(a: &BigUint, b: &num_bigint::BigUint) { // `num_bigint::BigUint` doesn't expose it's internals, so we need to convert into that to // compare. - let limbs = (0 .. a.len()).map(|i| a.get(i)).collect(); + let limbs = (0..a.len()).map(|i| a.get(i)).collect(); let num_a = num_bigint::BigUint::new(limbs); assert!(&num_a == b, "\narithmetic: {:?}\nnum-bigint: {:?}", a, b); diff --git a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs index db415ecb84c7567b4e1bceb2775db836d3502289..d8f058ae51e2c0289a3a35df83ad068f5f13bf06 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/fixed_point.rs @@ -28,7 +28,7 @@ //! [here](https://docs.rs/honggfuzz/). use honggfuzz::fuzz; -use sp_arithmetic::{FixedPointNumber, FixedI64, traits::Saturating}; +use sp_arithmetic::{traits::Saturating, FixedI64, FixedPointNumber}; fn main() { loop { @@ -38,7 +38,8 @@ fn main() { // Check `from_rational` and division are consistent. if y != 0 { - let f1 = FixedI64::saturating_from_integer(x) / FixedI64::saturating_from_integer(y); + let f1 = + FixedI64::saturating_from_integer(x) / FixedI64::saturating_from_integer(y); let f2 = FixedI64::saturating_from_rational(x, y); assert_eq!(f1.into_inner(), f2.into_inner()); } @@ -75,7 +76,8 @@ fn main() { let a = FixedI64::saturating_from_rational(2, 5); let b = a.saturating_mul_acc_int(x); let xx = FixedI64::saturating_from_integer(x); - let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 / FixedI64::accuracy() as i128; + let d = a.saturating_mul(xx).saturating_add(xx).into_inner() as i128 / + FixedI64::accuracy() as i128; assert_eq!(b, d); }); } diff --git a/substrate/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs b/substrate/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs index a1689716b56c6a6c690633dd7d3cb64460d2551a..d829a93ad4bb21c9c1f3ca2106a1d54829ce7871 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/multiply_by_rational.rs @@ -60,7 +60,7 @@ fn main() { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; if a.is_zero() { - return Zero::zero(); + return Zero::zero() } let c = c.max(1); diff --git a/substrate/primitives/arithmetic/fuzzer/src/normalize.rs b/substrate/primitives/arithmetic/fuzzer/src/normalize.rs index 48d52ba71bab628f7bab3363141f51fc4b0cbbcb..7f9f8cb3c79e074034fde9e44498dbff4fbcc78d 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/normalize.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/normalize.rs @@ -15,7 +15,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - //! # Running //! Running this fuzzer can be done with `cargo hfuzz run normalize`. `honggfuzz` CLI options can //! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. @@ -37,7 +36,9 @@ fn main() { loop { fuzz!(|data: (Vec, Ty)| { let (data, norm) = data; - if data.len() == 0 { return; } + if data.len() == 0 { + return + } let pre_sum: u128 = data.iter().map(|x| *x as u128).sum(); let normalized = data.normalize(norm); @@ -50,13 +51,7 @@ fn main() { let sum: u128 = normalized.iter().map(|x| *x as u128).sum(); // if this function returns Ok(), then it will ALWAYS be accurate. - assert_eq!( - sum, - norm as u128, - "sums don't match {:?}, {}", - normalized, - norm, - ); + assert_eq!(sum, norm as u128, "sums don't match {:?}, {}", normalized, norm,); } else { panic!("Should have returned Ok for input = {:?}, target = {:?}", data, norm); } diff --git a/substrate/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/substrate/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index 47ba5a4803056448d9e5c7b4e74eb9b8e533b743..c7f6a14c5f79c68e71611aad1682e4e5041cfef9 100644 --- a/substrate/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/substrate/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -24,16 +24,11 @@ //! `cargo hfuzz run-debug per_thing_rational hfuzz_workspace/per_thing_rational/*.fuzz`. use honggfuzz::fuzz; -use sp_arithmetic::{ - PerThing, PerU16, Percent, Perbill, Perquintill, traits::SaturatedConversion, -}; +use sp_arithmetic::{traits::SaturatedConversion, PerThing, PerU16, Perbill, Percent, Perquintill}; fn main() { loop { - fuzz!(| - data: ((u16, u16), (u32, u32), (u64, u64)) - | { - + fuzz!(|data: ((u16, u16), (u32, u32), (u64, u64))| { let (u16_pair, u32_pair, u64_pair) = data; // peru16 @@ -109,7 +104,6 @@ fn main() { Perquintill::from_float(smaller as f64 / bigger.max(1) as f64), 1000, ); - }) } } diff --git a/substrate/primitives/arithmetic/src/biguint.rs b/substrate/primitives/arithmetic/src/biguint.rs index 859cf829246f1e9cddd7298d387466cc3e3da398..2360151dafad972d31f2b92cbcc37e00c1ea0f0c 100644 --- a/substrate/primitives/arithmetic/src/biguint.rs +++ b/substrate/primitives/arithmetic/src/biguint.rs @@ -7,7 +7,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -17,9 +17,9 @@ //! Infinite precision unsigned integer for substrate runtime. -use num_traits::{Zero, One}; -use sp_std::{cmp::Ordering, ops, prelude::*, vec, cell::RefCell, convert::TryFrom}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use num_traits::{One, Zero}; +use sp_std::{cell::RefCell, cmp::Ordering, convert::TryFrom, ops, prelude::*, vec}; // A sensible value for this would be half of the dword size of the host machine. Since the // runtime is compiled to 32bit webassembly, using 32 and 64 for single and double respectively @@ -105,7 +105,9 @@ impl BigUint { } /// Number of limbs. - pub fn len(&self) -> usize { self.digits.len() } + pub fn len(&self) -> usize { + self.digits.len() + } /// A naive getter for limb at `index`. Note that the order is lsb -> msb. /// @@ -156,7 +158,9 @@ impl BigUint { // by definition, a big-int number should never have leading zero limbs. This function // has the ability to cause this. There is nothing to do if the number already has 1 // limb only. call it a day and return. - if self.len().is_zero() { return; } + if self.len().is_zero() { + return + } let index = self.digits.iter().position(|&elem| elem != 0).unwrap_or(self.len() - 1); if index > 0 { @@ -168,7 +172,9 @@ impl BigUint { /// is already bigger than `size` limbs. pub fn lpad(&mut self, size: usize) { let n = self.len(); - if n >= size { return; } + if n >= size { + return + } let pad = size - n; let mut new_digits = (0..pad).map(|_| 0).collect::>(); new_digits.extend(self.digits.iter()); @@ -260,15 +266,15 @@ impl BigUint { if self.get(j) == 0 { // Note: `with_capacity` allocates with 0. Explicitly set j + m to zero if // otherwise. - continue; + continue } let mut k = 0; for i in 0..m { // PROOF: (B−1) × (B−1) + (B−1) + (B−1) = B^2 −1 < B^2. addition is safe. - let t = mul_single(self.get(j), other.get(i)) - + Double::from(w.get(i + j)) - + Double::from(k); + let t = mul_single(self.get(j), other.get(i)) + + Double::from(w.get(i + j)) + + Double::from(k); w.set(i + j, (t % B) as Single); // PROOF: (B^2 - 1) / B < B. conversion is safe. k = (t / B) as Single; @@ -288,9 +294,9 @@ impl BigUint { let mut out = Self::with_capacity(n); let mut r: Single = 0; // PROOF: (B-1) * B + (B-1) still fits in double - let with_r = |x: Single, r: Single| { Double::from(r) * B + Double::from(x) }; + let with_r = |x: Single, r: Single| Double::from(r) * B + Double::from(x); for d in (0..n).rev() { - let (q, rr) = div_single(with_r(self.get(d), r), other) ; + let (q, rr) = div_single(with_r(self.get(d), r), other); out.set(d, q as Single); r = rr; } @@ -311,11 +317,7 @@ impl BigUint { /// /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. pub fn div(self, other: &Self, rem: bool) -> Option<(Self, Self)> { - if other.len() <= 1 - || other.msb() == 0 - || self.msb() == 0 - || self.len() <= other.len() - { + if other.len() <= 1 || other.msb() == 0 || self.msb() == 0 || self.len() <= other.len() { return None } let n = other.len(); @@ -344,9 +346,7 @@ impl BigUint { // PROOF: this always fits into `Double`. In the context of Single = u8, and // Double = u16, think of 255 * 256 + 255 which is just u16::MAX. let dividend = - Double::from(self_norm.get(j + n)) - * B - + Double::from(self_norm.get(j + n - 1)); + Double::from(self_norm.get(j + n)) * B + Double::from(self_norm.get(j + n - 1)); let divisor = other_norm.get(n - 1); div_single(dividend, divisor) }; @@ -377,23 +377,30 @@ impl BigUint { test(); while (*rhat.borrow() as Double) < B { - if !test() { break; } + if !test() { + break + } } let qhat = qhat.into_inner(); // we don't need rhat anymore. just let it go out of scope when it does. // step D4 - let lhs = Self { digits: (j..=j+n).rev().map(|d| self_norm.get(d)).collect() }; + let lhs = Self { digits: (j..=j + n).rev().map(|d| self_norm.get(d)).collect() }; let rhs = other_norm.clone().mul(&Self::from(qhat)); let maybe_sub = lhs.sub(&rhs); let mut negative = false; let sub = match maybe_sub { Ok(t) => t, - Err(t) => { negative = true; t } + Err(t) => { + negative = true; + t + }, }; - (j..=j+n).for_each(|d| { self_norm.set(d, sub.get(d - j)); }); + (j..=j + n).for_each(|d| { + self_norm.set(d, sub.get(d - j)); + }); // step D5 // PROOF: the `test()` specifically decreases qhat until it is below `B`. conversion @@ -403,9 +410,11 @@ impl BigUint { // step D6: add back if negative happened. if negative { q.set(j, q.get(j) - 1); - let u = Self { digits: (j..=j+n).rev().map(|d| self_norm.get(d)).collect() }; + let u = Self { digits: (j..=j + n).rev().map(|d| self_norm.get(d)).collect() }; let r = other_norm.clone().add(&u); - (j..=j+n).rev().for_each(|d| { self_norm.set(d, r.get(d - j)); }) + (j..=j + n).rev().for_each(|d| { + self_norm.set(d, r.get(d - j)); + }) } } @@ -415,9 +424,8 @@ impl BigUint { if normalizer_bits > 0 { let s = SHIFT as u32; let nb = normalizer_bits; - for d in 0..n-1 { - let v = self_norm.get(d) >> nb - | self_norm.get(d + 1).overflowing_shl(s - nb).0; + for d in 0..n - 1 { + let v = self_norm.get(d) >> nb | self_norm.get(d + 1).overflowing_shl(s - nb).0; r.set(d, v); } r.set(n - 1, self_norm.get(n - 1) >> normalizer_bits); @@ -445,7 +453,6 @@ impl sp_std::fmt::Debug for BigUint { fn fmt(&self, _: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { Ok(()) } - } impl PartialEq for BigUint { @@ -475,7 +482,7 @@ impl Ord for BigUint { Ordering::Equal => lhs.cmp(rhs), _ => len_cmp, } - } + }, } } } @@ -632,18 +639,9 @@ pub mod tests { #[test] fn equality_works() { - assert_eq!( - BigUint { digits: vec![1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - true, - ); - assert_eq!( - BigUint { digits: vec![3, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - false, - ); - assert_eq!( - BigUint { digits: vec![0, 1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - true, - ); + assert_eq!(BigUint { digits: vec![1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, true,); + assert_eq!(BigUint { digits: vec![3, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, false,); + assert_eq!(BigUint { digits: vec![0, 1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, true,); } #[test] @@ -669,14 +667,8 @@ pub mod tests { use sp_std::convert::TryFrom; assert_eq!(u64::try_from(with_limbs(1)).unwrap(), 1); assert_eq!(u64::try_from(with_limbs(2)).unwrap(), u32::MAX as u64 + 2); - assert_eq!( - u64::try_from(with_limbs(3)).unwrap_err(), - "cannot fit a number into u64", - ); - assert_eq!( - u128::try_from(with_limbs(3)).unwrap(), - u32::MAX as u128 + u64::MAX as u128 + 3 - ); + assert_eq!(u64::try_from(with_limbs(3)).unwrap_err(), "cannot fit a number into u64",); + assert_eq!(u128::try_from(with_limbs(3)).unwrap(), u32::MAX as u128 + u64::MAX as u128 + 3); } #[test] diff --git a/substrate/primitives/arithmetic/src/fixed_point.rs b/substrate/primitives/arithmetic/src/fixed_point.rs index 9c5078ca66f093880d1fb0123e9256e0736ed985..1515573b46742fa4069c196bcea52c9a901bd584 100644 --- a/substrate/primitives/arithmetic/src/fixed_point.rs +++ b/substrate/primitives/arithmetic/src/fixed_point.rs @@ -17,22 +17,38 @@ //! Decimal Fixed Point implementations for Substrate runtime. -use sp_std::{ops::{self, Add, Sub, Mul, Div}, fmt::Debug, prelude::*, convert::{TryInto, TryFrom}}; -use codec::{Encode, Decode, CompactAs}; use crate::{ - helpers_128bit::multiply_by_rational, PerThing, + helpers_128bit::multiply_by_rational, traits::{ - SaturatedConversion, CheckedSub, CheckedAdd, CheckedMul, CheckedDiv, CheckedNeg, - Bounded, Saturating, UniqueSaturatedInto, Zero, One + Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedSub, One, + SaturatedConversion, Saturating, UniqueSaturatedInto, Zero, }, + PerThing, +}; +use codec::{CompactAs, Decode, Encode}; +use sp_std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, + ops::{self, Add, Div, Mul, Sub}, + prelude::*, }; #[cfg(feature = "std")] use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; /// Integer types that can be used to interact with `FixedPointNumber` implementations. -pub trait FixedPointOperand: Copy + Clone + Bounded + Zero + Saturating - + PartialOrd + UniqueSaturatedInto + TryFrom + CheckedNeg {} +pub trait FixedPointOperand: + Copy + + Clone + + Bounded + + Zero + + Saturating + + PartialOrd + + UniqueSaturatedInto + + TryFrom + + CheckedNeg +{ +} impl FixedPointOperand for i128 {} impl FixedPointOperand for u128 {} @@ -53,11 +69,26 @@ impl FixedPointOperand for u8 {} /// to `Self::Inner::max_value() / Self::DIV`. /// This is also referred to as the _accuracy_ of the type in the documentation. pub trait FixedPointNumber: - Sized + Copy + Default + Debug - + Saturating + Bounded - + Eq + PartialEq + Ord + PartialOrd - + CheckedSub + CheckedAdd + CheckedMul + CheckedDiv - + Add + Sub + Div + Mul + Zero + One + Sized + + Copy + + Default + + Debug + + Saturating + + Bounded + + Eq + + PartialEq + + Ord + + PartialOrd + + CheckedSub + + CheckedAdd + + CheckedMul + + CheckedDiv + + Add + + Sub + + Div + + Mul + + Zero + + One { /// The underlying data type used for this fixed point number. type Inner: Debug + One + CheckedMul + CheckedDiv + FixedPointOperand; @@ -108,7 +139,10 @@ pub trait FixedPointNumber: /// Creates `self` from a rational number. Equal to `n / d`. /// /// Returns `None` if `d == 0` or `n / d` exceeds accuracy. - fn checked_from_rational(n: N, d: D) -> Option { + fn checked_from_rational( + n: N, + d: D, + ) -> Option { if d == D::zero() { return None } @@ -117,7 +151,8 @@ pub trait FixedPointNumber: let d: I129 = d.into(); let negative = n.negative != d.negative; - multiply_by_rational(n.value, Self::DIV.unique_saturated_into(), d.value).ok() + multiply_by_rational(n.value, Self::DIV.unique_saturated_into(), d.value) + .ok() .and_then(|value| from_i129(I129 { value, negative })) .map(Self::from_inner) } @@ -130,7 +165,8 @@ pub trait FixedPointNumber: let rhs: I129 = n.into(); let negative = lhs.negative != rhs.negative; - multiply_by_rational(lhs.value, rhs.value, Self::DIV.unique_saturated_into()).ok() + multiply_by_rational(lhs.value, rhs.value, Self::DIV.unique_saturated_into()) + .ok() .and_then(|value| from_i129(I129 { value, negative })) } @@ -149,7 +185,8 @@ pub trait FixedPointNumber: let rhs: I129 = d.into(); let negative = lhs.negative != rhs.negative; - lhs.value.checked_div(rhs.value) + lhs.value + .checked_div(rhs.value) .and_then(|n| n.checked_div(Self::DIV.unique_saturated_into())) .and_then(|value| from_i129(I129 { value, negative })) } @@ -212,7 +249,8 @@ pub trait FixedPointNumber: /// Returns the integer part. fn trunc(self) -> Self { - self.into_inner().checked_div(&Self::DIV) + self.into_inner() + .checked_div(&Self::DIV) .expect("panics only if DIV is zero, DIV is not zero; qed") .checked_mul(&Self::DIV) .map(Self::from_inner) @@ -281,7 +319,8 @@ struct I129 { impl From for I129 { fn from(n: N) -> I129 { if n < N::zero() { - let value: u128 = n.checked_neg() + let value: u128 = n + .checked_neg() .map(|n| n.unique_saturated_into()) .unwrap_or_else(|| N::max_value().unique_saturated_into().saturating_add(1)); I129 { value, negative: true } @@ -322,9 +361,10 @@ macro_rules! implement_fixed { $title:expr $(,)? ) => { /// A fixed point number representation in the range. - /// #[doc = $title] - #[derive(Encode, Decode, CompactAs, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] + #[derive( + Encode, Decode, CompactAs, Default, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, + )] pub struct $name($inner_type); impl From<$inner_type> for $name { @@ -386,7 +426,7 @@ macro_rules! implement_fixed { fn saturating_pow(self, exp: usize) -> Self { if exp == 0 { - return Self::saturating_from_integer(1); + return Self::saturating_from_integer(1) } let exp = exp as u32; @@ -471,7 +511,8 @@ macro_rules! implement_fixed { let rhs: I129 = other.0.into(); let negative = lhs.negative != rhs.negative; - multiply_by_rational(lhs.value, Self::DIV as u128, rhs.value).ok() + multiply_by_rational(lhs.value, Self::DIV as u128, rhs.value) + .ok() .and_then(|value| from_i129(I129 { value, negative })) .map(Self) } @@ -483,7 +524,8 @@ macro_rules! implement_fixed { let rhs: I129 = other.0.into(); let negative = lhs.negative != rhs.negative; - multiply_by_rational(lhs.value, rhs.value, Self::DIV as u128).ok() + multiply_by_rational(lhs.value, rhs.value, Self::DIV as u128) + .ok() .and_then(|value| from_i129(I129 { value, negative })) .map(Self) } @@ -524,7 +566,11 @@ macro_rules! implement_fixed { format!("{}{}", signum_for_zero, int) }; let precision = (Self::accuracy() as f64).log10() as usize; - let fractional = format!("{:0>weight$}", ((self.0 % Self::accuracy()) as i128).abs(), weight=precision); + let fractional = format!( + "{:0>weight$}", + ((self.0 % Self::accuracy()) as i128).abs(), + weight = precision + ); write!(f, "{}({}.{})", stringify!($name), integral, fractional) } @@ -534,7 +580,10 @@ macro_rules! implement_fixed { } } - impl From

for $name where P::Inner: FixedPointOperand { + impl From

for $name + where + P::Inner: FixedPointOperand, + { fn from(p: P) -> Self { let accuracy = P::ACCURACY; let value = p.deconstruct(); @@ -554,8 +603,8 @@ macro_rules! implement_fixed { type Err = &'static str; fn from_str(s: &str) -> Result { - let inner: ::Inner = s.parse() - .map_err(|_| "invalid string input for fixed point number")?; + let inner: ::Inner = + s.parse().map_err(|_| "invalid string input for fixed point number")?; Ok(Self::from_inner(inner)) } } @@ -610,50 +659,32 @@ macro_rules! implement_fixed { #[test] fn from_i129_works() { - let a = I129 { - value: 1, - negative: true, - }; + let a = I129 { value: 1, negative: true }; // Can't convert negative number to unsigned. assert_eq!(from_i129::(a), None); - let a = I129 { - value: u128::MAX - 1, - negative: false, - }; + let a = I129 { value: u128::MAX - 1, negative: false }; // Max - 1 value fits. assert_eq!(from_i129::(a), Some(u128::MAX - 1)); - let a = I129 { - value: u128::MAX, - negative: false, - }; + let a = I129 { value: u128::MAX, negative: false }; // Max value fits. assert_eq!(from_i129::(a), Some(u128::MAX)); - let a = I129 { - value: i128::MAX as u128 + 1, - negative: true, - }; + let a = I129 { value: i128::MAX as u128 + 1, negative: true }; // Min value fits. assert_eq!(from_i129::(a), Some(i128::MIN)); - let a = I129 { - value: i128::MAX as u128 + 1, - negative: false, - }; + let a = I129 { value: i128::MAX as u128 + 1, negative: false }; // Max + 1 does not fit. assert_eq!(from_i129::(a), None); - let a = I129 { - value: i128::MAX as u128, - negative: false, - }; + let a = I129 { value: i128::MAX as u128, negative: false }; // Max value fits. assert_eq!(from_i129::(a), Some(i128::MAX)); @@ -724,7 +755,6 @@ macro_rules! implement_fixed { // Min. assert_eq!($name::max_value(), b); - } } @@ -849,8 +879,7 @@ macro_rules! implement_fixed { let accuracy = $name::accuracy(); // Case where integer fits. - let a = $name::checked_from_integer(42) - .expect("42 * accuracy <= inner_max; qed"); + let a = $name::checked_from_integer(42).expect("42 * accuracy <= inner_max; qed"); assert_eq!(a.into_inner(), 42 * accuracy); // Max integer that fit. @@ -928,7 +957,7 @@ macro_rules! implement_fixed { if $name::SIGNED { // Negative case: -2.5 let a = $name::saturating_from_rational(-5, 2); - assert_eq!(a.into_inner(), 0 - 25 * accuracy / 10); + assert_eq!(a.into_inner(), 0 - 25 * accuracy / 10); // Other negative case: -2.5 let a = $name::saturating_from_rational(5, -2); @@ -1048,7 +1077,10 @@ macro_rules! implement_fixed { if $name::SIGNED { // Min - 1 => Underflow => None. - let a = $name::checked_from_rational(inner_max as u128 + 2, 0.saturating_sub(accuracy)); + let a = $name::checked_from_rational( + inner_max as u128 + 2, + 0.saturating_sub(accuracy), + ); assert_eq!(a, None); let a = $name::checked_from_rational(inner_max, 0 - 3 * accuracy).unwrap(); @@ -1163,15 +1195,15 @@ macro_rules! implement_fixed { // Max - 1. let b = $name::from_inner(inner_max - 1); - assert_eq!(a.checked_mul(&(b/2.into())), Some(b)); + assert_eq!(a.checked_mul(&(b / 2.into())), Some(b)); // Max. let c = $name::from_inner(inner_max); - assert_eq!(a.checked_mul(&(c/2.into())), Some(b)); + assert_eq!(a.checked_mul(&(c / 2.into())), Some(b)); // Max + 1 => None. let e = $name::from_inner(1); - assert_eq!(a.checked_mul(&(c/2.into()+e)), None); + assert_eq!(a.checked_mul(&(c / 2.into() + e)), None); if $name::SIGNED { // Min + 1. @@ -1192,8 +1224,14 @@ macro_rules! implement_fixed { let b = $name::saturating_from_rational(1, -2); assert_eq!(b.checked_mul(&42.into()), Some(0.saturating_sub(21).into())); - assert_eq!(b.checked_mul(&$name::max_value()), $name::max_value().checked_div(&0.saturating_sub(2).into())); - assert_eq!(b.checked_mul(&$name::min_value()), $name::min_value().checked_div(&0.saturating_sub(2).into())); + assert_eq!( + b.checked_mul(&$name::max_value()), + $name::max_value().checked_div(&0.saturating_sub(2).into()) + ); + assert_eq!( + b.checked_mul(&$name::min_value()), + $name::min_value().checked_div(&0.saturating_sub(2).into()) + ); assert_eq!(c.checked_mul(&$name::min_value()), None); } @@ -1203,8 +1241,14 @@ macro_rules! implement_fixed { assert_eq!(a.checked_mul(&42.into()), Some(21.into())); assert_eq!(c.checked_mul(&2.into()), Some(510.into())); assert_eq!(c.checked_mul(&$name::max_value()), None); - assert_eq!(a.checked_mul(&$name::max_value()), $name::max_value().checked_div(&2.into())); - assert_eq!(a.checked_mul(&$name::min_value()), $name::min_value().checked_div(&2.into())); + assert_eq!( + a.checked_mul(&$name::max_value()), + $name::max_value().checked_div(&2.into()) + ); + assert_eq!( + a.checked_mul(&$name::min_value()), + $name::min_value().checked_div(&2.into()) + ); } #[test] @@ -1230,13 +1274,25 @@ macro_rules! implement_fixed { if b < c { // Not executed by unsigned inners. - assert_eq!(a.checked_div_int(0.saturating_sub(2)), Some(0.saturating_sub(inner_max / (2 * accuracy)))); - assert_eq!(a.checked_div_int(0.saturating_sub(inner_max / accuracy)), Some(0.saturating_sub(1))); + assert_eq!( + a.checked_div_int(0.saturating_sub(2)), + Some(0.saturating_sub(inner_max / (2 * accuracy))) + ); + assert_eq!( + a.checked_div_int(0.saturating_sub(inner_max / accuracy)), + Some(0.saturating_sub(1)) + ); assert_eq!(b.checked_div_int(i128::MIN), Some(0)); assert_eq!(b.checked_div_int(inner_min / accuracy), Some(1)); assert_eq!(b.checked_div_int(1i8), None); - assert_eq!(b.checked_div_int(0.saturating_sub(2)), Some(0.saturating_sub(inner_min / (2 * accuracy)))); - assert_eq!(b.checked_div_int(0.saturating_sub(inner_min / accuracy)), Some(0.saturating_sub(1))); + assert_eq!( + b.checked_div_int(0.saturating_sub(2)), + Some(0.saturating_sub(inner_min / (2 * accuracy))) + ); + assert_eq!( + b.checked_div_int(0.saturating_sub(inner_min / accuracy)), + Some(0.saturating_sub(1)) + ); assert_eq!(c.checked_div_int(i128::MIN), Some(0)); assert_eq!(d.checked_div_int(i32::MIN), Some(0)); } @@ -1294,7 +1350,10 @@ macro_rules! implement_fixed { if $name::SIGNED { assert_eq!($name::from_inner(inner_min).saturating_abs(), $name::max_value()); - assert_eq!($name::saturating_from_rational(-1, 2).saturating_abs(), (1, 2).into()); + assert_eq!( + $name::saturating_from_rational(-1, 2).saturating_abs(), + (1, 2).into() + ); } } @@ -1319,31 +1378,72 @@ macro_rules! implement_fixed { #[test] fn saturating_pow_should_work() { - assert_eq!($name::saturating_from_integer(2).saturating_pow(0), $name::saturating_from_integer(1)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(1), $name::saturating_from_integer(2)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(2), $name::saturating_from_integer(4)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(3), $name::saturating_from_integer(8)); - assert_eq!($name::saturating_from_integer(2).saturating_pow(50), - $name::saturating_from_integer(1125899906842624i64)); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(0), + $name::saturating_from_integer(1) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(1), + $name::saturating_from_integer(2) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(2), + $name::saturating_from_integer(4) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(3), + $name::saturating_from_integer(8) + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(50), + $name::saturating_from_integer(1125899906842624i64) + ); assert_eq!($name::saturating_from_integer(1).saturating_pow(1000), (1).into()); - assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::MAX), (1).into()); + assert_eq!( + $name::saturating_from_integer(1).saturating_pow(usize::MAX), + (1).into() + ); if $name::SIGNED { // Saturating. - assert_eq!($name::saturating_from_integer(2).saturating_pow(68), $name::max_value()); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(68), + $name::max_value() + ); assert_eq!($name::saturating_from_integer(-1).saturating_pow(1000), (1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(1001), 0.saturating_sub(1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::MAX), 0.saturating_sub(1).into()); - assert_eq!($name::saturating_from_integer(-1).saturating_pow(usize::MAX - 1), (1).into()); + assert_eq!( + $name::saturating_from_integer(-1).saturating_pow(1001), + 0.saturating_sub(1).into() + ); + assert_eq!( + $name::saturating_from_integer(-1).saturating_pow(usize::MAX), + 0.saturating_sub(1).into() + ); + assert_eq!( + $name::saturating_from_integer(-1).saturating_pow(usize::MAX - 1), + (1).into() + ); } - assert_eq!($name::saturating_from_integer(114209).saturating_pow(5), $name::max_value()); - - assert_eq!($name::saturating_from_integer(1).saturating_pow(usize::MAX), (1).into()); - assert_eq!($name::saturating_from_integer(0).saturating_pow(usize::MAX), (0).into()); - assert_eq!($name::saturating_from_integer(2).saturating_pow(usize::MAX), $name::max_value()); + assert_eq!( + $name::saturating_from_integer(114209).saturating_pow(5), + $name::max_value() + ); + + assert_eq!( + $name::saturating_from_integer(1).saturating_pow(usize::MAX), + (1).into() + ); + assert_eq!( + $name::saturating_from_integer(0).saturating_pow(usize::MAX), + (0).into() + ); + assert_eq!( + $name::saturating_from_integer(2).saturating_pow(usize::MAX), + $name::max_value() + ); } #[test] @@ -1368,9 +1468,18 @@ macro_rules! implement_fixed { if b < c { // Not executed by unsigned inners. - assert_eq!(a.checked_div(&0.saturating_sub(2).into()), Some($name::from_inner(0.saturating_sub(inner_max / 2)))); - assert_eq!(a.checked_div(&-$name::max_value()), Some(0.saturating_sub(1).into())); - assert_eq!(b.checked_div(&0.saturating_sub(2).into()), Some($name::from_inner(0.saturating_sub(inner_min / 2)))); + assert_eq!( + a.checked_div(&0.saturating_sub(2).into()), + Some($name::from_inner(0.saturating_sub(inner_max / 2))) + ); + assert_eq!( + a.checked_div(&-$name::max_value()), + Some(0.saturating_sub(1).into()) + ); + assert_eq!( + b.checked_div(&0.saturating_sub(2).into()), + Some($name::from_inner(0.saturating_sub(inner_min / 2))) + ); assert_eq!(c.checked_div(&$name::max_value()), Some(0.into())); assert_eq!(b.checked_div(&b), Some($name::one())); } @@ -1427,14 +1536,10 @@ macro_rules! implement_fixed { assert_eq!(n, i + f); - let n = $name::saturating_from_rational(5, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(5, 2).frac().saturating_mul(10.into()); assert_eq!(n, 5.into()); - let n = $name::saturating_from_rational(1, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(1, 2).frac().saturating_mul(10.into()); assert_eq!(n, 5.into()); if $name::SIGNED { @@ -1444,14 +1549,10 @@ macro_rules! implement_fixed { assert_eq!(n, i - f); // The sign is attached to the integer part unless it is zero. - let n = $name::saturating_from_rational(-5, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(-5, 2).frac().saturating_mul(10.into()); assert_eq!(n, 5.into()); - let n = $name::saturating_from_rational(-1, 2) - .frac() - .saturating_mul(10.into()); + let n = $name::saturating_from_rational(-1, 2).frac().saturating_mul(10.into()); assert_eq!(n, 0.saturating_sub(5).into()); } } @@ -1564,30 +1665,51 @@ macro_rules! implement_fixed { #[test] fn fmt_should_work() { let zero = $name::zero(); - assert_eq!(format!("{:?}", zero), format!("{}(0.{:0>weight$})", stringify!($name), 0, weight=precision())); + assert_eq!( + format!("{:?}", zero), + format!("{}(0.{:0>weight$})", stringify!($name), 0, weight = precision()) + ); let one = $name::one(); - assert_eq!(format!("{:?}", one), format!("{}(1.{:0>weight$})", stringify!($name), 0, weight=precision())); + assert_eq!( + format!("{:?}", one), + format!("{}(1.{:0>weight$})", stringify!($name), 0, weight = precision()) + ); let frac = $name::saturating_from_rational(1, 2); - assert_eq!(format!("{:?}", frac), format!("{}(0.{:0weight$})", stringify!($name), 0, weight=precision())); + assert_eq!( + format!("{:?}", neg), + format!("{}(-1.{:0>weight$})", stringify!($name), 0, weight = precision()) + ); let frac = $name::saturating_from_rational(-314, 100); - assert_eq!(format!("{:?}", frac), format!("{}(-3.{:0 u128 { @@ -63,7 +67,9 @@ pub fn to_big_uint(x: u128) -> biguint::BigUint { /// /// Invariant: c must be greater than or equal to 1. pub fn multiply_by_rational(mut a: u128, mut b: u128, mut c: u128) -> Result { - if a.is_zero() || b.is_zero() { return Ok(Zero::zero()); } + if a.is_zero() || b.is_zero() { + return Ok(Zero::zero()) + } c = c.max(1); // a and b are interchangeable by definition in this function. It always helps to assume the @@ -102,9 +108,10 @@ pub fn multiply_by_rational(mut a: u128, mut b: u128, mut c: u128) -> Result (c / 2) { q = q.add(&to_big_uint(1)); } + let r: u128 = r.try_into().expect("reminder of div by c is always less than c; qed"); + if r > (c / 2) { + q = q.add(&to_big_uint(1)); + } q }; q.lstrip(); diff --git a/substrate/primitives/arithmetic/src/lib.rs b/substrate/primitives/arithmetic/src/lib.rs index 110e5c0728037f0b1632c273e714142679df3561..cf2e8a1a60640234247bb224057707915b6b96de 100644 --- a/substrate/primitives/arithmetic/src/lib.rs +++ b/substrate/primitives/arithmetic/src/lib.rs @@ -34,18 +34,18 @@ macro_rules! assert_eq_error_rate { } pub mod biguint; +pub mod fixed_point; pub mod helpers_128bit; -pub mod traits; pub mod per_things; -pub mod fixed_point; pub mod rational; +pub mod traits; -pub use fixed_point::{FixedPointNumber, FixedPointOperand, FixedI64, FixedI128, FixedU128}; -pub use per_things::{PerThing, InnerOf, UpperOf, Percent, PerU16, Permill, Perbill, Perquintill}; +pub use fixed_point::{FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, FixedU128}; +pub use per_things::{InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, UpperOf}; pub use rational::{Rational128, RationalInfinite}; -use sp_std::{prelude::*, cmp::Ordering, fmt::Debug, convert::TryInto}; -use traits::{BaseArithmetic, One, Zero, SaturatedConversion, Unsigned}; +use sp_std::{cmp::Ordering, convert::TryInto, fmt::Debug, prelude::*}; +use traits::{BaseArithmetic, One, SaturatedConversion, Unsigned, Zero}; /// Trait for comparing two numbers with an threshold. /// @@ -82,7 +82,6 @@ where _ => Ordering::Equal, } } - } } @@ -114,8 +113,10 @@ impl_normalize_for_numeric!(u8, u16, u32, u64, u128); impl Normalizable

for Vec

{ fn normalize(&self, targeted_sum: P) -> Result, &'static str> { - let uppers = - self.iter().map(|p| >::from(p.clone().deconstruct())).collect::>(); + let uppers = self + .iter() + .map(|p| >::from(p.clone().deconstruct())) + .collect::>(); let normalized = normalize(uppers.as_ref(), >::from(targeted_sum.deconstruct()))?; @@ -157,7 +158,8 @@ impl Normalizable

for Vec

{ /// /// * This proof is used in the implementation as well. pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str> - where T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug, +where + T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug, { // compute sum and return error if failed. let mut sum = T::zero(); @@ -171,12 +173,12 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // Nothing to do here. if count.is_zero() { - return Ok(Vec::::new()); + return Ok(Vec::::new()) } let diff = targeted_sum.max(sum) - targeted_sum.min(sum); if diff.is_zero() { - return Ok(input.to_vec()); + return Ok(input.to_vec()) } let needs_bump = targeted_sum > sum; @@ -198,7 +200,8 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str if !per_round.is_zero() { for _ in 0..count { - output_with_idx[min_index].1 = output_with_idx[min_index].1 + output_with_idx[min_index].1 = output_with_idx[min_index] + .1 .checked_add(&per_round) .expect("Proof provided in the module doc; qed."); if output_with_idx[min_index].1 >= threshold { @@ -210,7 +213,8 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // continue with the previous min_index while !leftover.is_zero() { - output_with_idx[min_index].1 = output_with_idx[min_index].1 + output_with_idx[min_index].1 = output_with_idx[min_index] + .1 .checked_add(&T::one()) .expect("Proof provided in the module doc; qed."); if output_with_idx[min_index].1 >= threshold { @@ -232,9 +236,8 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str if !per_round.is_zero() { for _ in 0..count { - output_with_idx[max_index].1 = output_with_idx[max_index].1 - .checked_sub(&per_round) - .unwrap_or_else(|| { + output_with_idx[max_index].1 = + output_with_idx[max_index].1.checked_sub(&per_round).unwrap_or_else(|| { let remainder = per_round - output_with_idx[max_index].1; leftover += remainder; output_with_idx[max_index].1.saturating_sub(per_round) @@ -284,7 +287,7 @@ mod normalize_tests { normalize(vec![8 as $type, 9, 7, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10], ); - } + }; } // it should work for all types as long as the length of vector can be converted to T. test_for!(u128); @@ -297,22 +300,13 @@ mod normalize_tests { #[test] fn fails_on_if_input_sum_large() { assert!(normalize(vec![1u8; 255].as_ref(), 10).is_ok()); - assert_eq!( - normalize(vec![1u8; 256].as_ref(), 10), - Err("sum of input cannot fit in `T`"), - ); + assert_eq!(normalize(vec![1u8; 256].as_ref(), 10), Err("sum of input cannot fit in `T`"),); } #[test] fn does_not_fail_on_subtraction_overflow() { - assert_eq!( - normalize(vec![1u8, 100, 100].as_ref(), 10).unwrap(), - vec![1, 9, 0], - ); - assert_eq!( - normalize(vec![1u8, 8, 9].as_ref(), 1).unwrap(), - vec![0, 1, 0], - ); + assert_eq!(normalize(vec![1u8, 100, 100].as_ref(), 10).unwrap(), vec![1, 9, 0],); + assert_eq!(normalize(vec![1u8, 8, 9].as_ref(), 1).unwrap(), vec![0, 1, 0],); } #[test] @@ -323,11 +317,9 @@ mod normalize_tests { #[test] fn works_for_per_thing() { assert_eq!( - vec![ - Perbill::from_percent(33), - Perbill::from_percent(33), - Perbill::from_percent(33) - ].normalize(Perbill::one()).unwrap(), + vec![Perbill::from_percent(33), Perbill::from_percent(33), Perbill::from_percent(33)] + .normalize(Perbill::one()) + .unwrap(), vec![ Perbill::from_parts(333333334), Perbill::from_parts(333333333), @@ -336,11 +328,9 @@ mod normalize_tests { ); assert_eq!( - vec![ - Perbill::from_percent(20), - Perbill::from_percent(15), - Perbill::from_percent(30) - ].normalize(Perbill::one()).unwrap(), + vec![Perbill::from_percent(20), Perbill::from_percent(15), Perbill::from_percent(30)] + .normalize(Perbill::one()) + .unwrap(), vec![ Perbill::from_parts(316666668), Perbill::from_parts(383333332), @@ -355,11 +345,9 @@ mod normalize_tests { // could have a situation where the sum cannot be calculated in the inner type. Calculating // using the upper type of the per_thing should assure this to be okay. assert_eq!( - vec![ - PerU16::from_percent(40), - PerU16::from_percent(40), - PerU16::from_percent(40), - ].normalize(PerU16::one()).unwrap(), + vec![PerU16::from_percent(40), PerU16::from_percent(40), PerU16::from_percent(40),] + .normalize(PerU16::one()) + .unwrap(), vec![ PerU16::from_parts(21845), // 33% PerU16::from_parts(21845), // 33% @@ -370,82 +358,40 @@ mod normalize_tests { #[test] fn normalize_works_all_le() { - assert_eq!( - normalize(vec![8u32, 9, 7, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![8u32, 9, 7, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10],); - assert_eq!( - normalize(vec![7u32, 7, 7, 7].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![7u32, 7, 7, 7].as_ref(), 40).unwrap(), vec![10, 10, 10, 10],); - assert_eq!( - normalize(vec![7u32, 7, 7, 10].as_ref(), 40).unwrap(), - vec![11, 11, 8, 10], - ); + assert_eq!(normalize(vec![7u32, 7, 7, 10].as_ref(), 40).unwrap(), vec![11, 11, 8, 10],); - assert_eq!( - normalize(vec![7u32, 8, 7, 10].as_ref(), 40).unwrap(), - vec![11, 8, 11, 10], - ); + assert_eq!(normalize(vec![7u32, 8, 7, 10].as_ref(), 40).unwrap(), vec![11, 8, 11, 10],); - assert_eq!( - normalize(vec![7u32, 7, 8, 10].as_ref(), 40).unwrap(), - vec![11, 11, 8, 10], - ); + assert_eq!(normalize(vec![7u32, 7, 8, 10].as_ref(), 40).unwrap(), vec![11, 11, 8, 10],); } #[test] fn normalize_works_some_ge() { - assert_eq!( - normalize(vec![8u32, 11, 9, 10].as_ref(), 40).unwrap(), - vec![10, 11, 9, 10], - ); + assert_eq!(normalize(vec![8u32, 11, 9, 10].as_ref(), 40).unwrap(), vec![10, 11, 9, 10],); } #[test] fn always_inc_min() { - assert_eq!( - normalize(vec![10u32, 7, 10, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); - assert_eq!( - normalize(vec![10u32, 10, 7, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); - assert_eq!( - normalize(vec![10u32, 10, 10, 7].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![10u32, 7, 10, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10],); + assert_eq!(normalize(vec![10u32, 10, 7, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10],); + assert_eq!(normalize(vec![10u32, 10, 10, 7].as_ref(), 40).unwrap(), vec![10, 10, 10, 10],); } #[test] fn normalize_works_all_ge() { - assert_eq!( - normalize(vec![12u32, 11, 13, 10].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![12u32, 11, 13, 10].as_ref(), 40).unwrap(), vec![10, 10, 10, 10],); - assert_eq!( - normalize(vec![13u32, 13, 13, 13].as_ref(), 40).unwrap(), - vec![10, 10, 10, 10], - ); + assert_eq!(normalize(vec![13u32, 13, 13, 13].as_ref(), 40).unwrap(), vec![10, 10, 10, 10],); - assert_eq!( - normalize(vec![13u32, 13, 13, 10].as_ref(), 40).unwrap(), - vec![12, 9, 9, 10], - ); + assert_eq!(normalize(vec![13u32, 13, 13, 10].as_ref(), 40).unwrap(), vec![12, 9, 9, 10],); - assert_eq!( - normalize(vec![13u32, 12, 13, 10].as_ref(), 40).unwrap(), - vec![9, 12, 9, 10], - ); + assert_eq!(normalize(vec![13u32, 12, 13, 10].as_ref(), 40).unwrap(), vec![9, 12, 9, 10],); - assert_eq!( - normalize(vec![13u32, 13, 12, 10].as_ref(), 40).unwrap(), - vec![9, 9, 12, 10], - ); + assert_eq!(normalize(vec![13u32, 13, 12, 10].as_ref(), 40).unwrap(), vec![9, 9, 12, 10],); } } diff --git a/substrate/primitives/arithmetic/src/per_things.rs b/substrate/primitives/arithmetic/src/per_things.rs index 80d556486d563a60a711f315357e0f20f5206ea9..b114c4a96788d54b11ee9bec5c4d25c8167be21d 100644 --- a/substrate/primitives/arithmetic/src/per_things.rs +++ b/substrate/primitives/arithmetic/src/per_things.rs @@ -16,16 +16,20 @@ // limitations under the License. #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use sp_std::{ops, fmt, prelude::*, convert::{TryFrom, TryInto}}; -use codec::{Encode, CompactAs}; -use num_traits::Pow; use crate::traits::{ - SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, Unsigned, - One, + BaseArithmetic, Bounded, One, SaturatedConversion, Saturating, UniqueSaturatedInto, Unsigned, + Zero, }; +use codec::{CompactAs, Encode}; +use num_traits::Pow; use sp_debug_derive::RuntimeDebug; +use sp_std::{ + convert::{TryFrom, TryInto}, + fmt, ops, + prelude::*, +}; /// Get the inner type of a `PerThing`. pub type InnerOf

=

::Inner; @@ -36,8 +40,19 @@ pub type UpperOf

=

::Upper; /// Something that implements a fixed point ration with an arbitrary granularity `X`, as _parts per /// `X`_. pub trait PerThing: - Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug - + ops::Div + ops::Mul + Pow + Sized + + Saturating + + Copy + + Default + + Eq + + PartialEq + + Ord + + PartialOrd + + Bounded + + fmt::Debug + + ops::Div + + ops::Mul + + Pow { /// The data type used to build this per-thingy. type Inner: BaseArithmetic + Unsigned + Copy + Into + fmt::Debug; @@ -56,16 +71,24 @@ pub trait PerThing: const ACCURACY: Self::Inner; /// Equivalent to `Self::from_parts(0)`. - fn zero() -> Self { Self::from_parts(Self::Inner::zero()) } + fn zero() -> Self { + Self::from_parts(Self::Inner::zero()) + } /// Return `true` if this is nothing. - fn is_zero(&self) -> bool { self.deconstruct() == Self::Inner::zero() } + fn is_zero(&self) -> bool { + self.deconstruct() == Self::Inner::zero() + } /// Equivalent to `Self::from_parts(Self::ACCURACY)`. - fn one() -> Self { Self::from_parts(Self::ACCURACY) } + fn one() -> Self { + Self::from_parts(Self::ACCURACY) + } /// Return `true` if this is one. - fn is_one(&self) -> bool { self.deconstruct() == Self::ACCURACY } + fn is_one(&self) -> bool { + self.deconstruct() == Self::ACCURACY + } /// Build this type from a percent. Equivalent to `Self::from_parts(x * Self::ACCURACY / 100)` /// but more accurate and can cope with potential type overflows. @@ -104,8 +127,13 @@ pub trait PerThing: /// ``` fn mul_floor(self, b: N) -> N where - N: Clone + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Unsigned, Self::Inner: Into, { overflow_prune_mul::(b, self.deconstruct(), Rounding::Down) @@ -128,9 +156,14 @@ pub trait PerThing: /// ``` fn mul_ceil(self, b: N) -> N where - N: Clone + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Unsigned, - Self::Inner: Into + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Unsigned, + Self::Inner: Into, { overflow_prune_mul::(b, self.deconstruct(), Rounding::Up) } @@ -146,9 +179,14 @@ pub trait PerThing: /// ``` fn saturating_reciprocal_mul(self, b: N) -> N where - N: Clone + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating + + Unsigned, Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Nearest) @@ -168,9 +206,14 @@ pub trait PerThing: /// ``` fn saturating_reciprocal_mul_floor(self, b: N) -> N where - N: Clone + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating + + Unsigned, Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Down) @@ -190,9 +233,14 @@ pub trait PerThing: /// ``` fn saturating_reciprocal_mul_ceil(self, b: N) -> N where - N: Clone + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating + - Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating + + Unsigned, Self::Inner: Into, { saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Up) @@ -211,7 +259,9 @@ pub trait PerThing: /// Same as `Self::from_float`. #[deprecated = "Use from_float instead"] #[cfg(feature = "std")] - fn from_fraction(x: f64) -> Self { Self::from_float(x) } + fn from_fraction(x: f64) -> Self { + Self::from_float(x) + } /// Approximate the fraction `p/q` into a per-thing fraction. This will never overflow. /// @@ -233,18 +283,31 @@ pub trait PerThing: /// ``` fn from_rational(p: N, q: N) -> Self where - N: Clone + Ord + TryInto + TryInto + - ops::Div + ops::Rem + ops::Add + Unsigned, + N: Clone + + Ord + + TryInto + + TryInto + + ops::Div + + ops::Rem + + ops::Add + + Unsigned, Self::Inner: Into; /// Same as `Self::from_rational`. #[deprecated = "Use from_rational instead"] fn from_rational_approximation(p: N, q: N) -> Self - where - N: Clone + Ord + TryInto + TryInto - + ops::Div + ops::Rem + ops::Add + Unsigned - + Zero + One, - Self::Inner: Into, + where + N: Clone + + Ord + + TryInto + + TryInto + + ops::Div + + ops::Rem + + ops::Add + + Unsigned + + Zero + + One, + Self::Inner: Into, { Self::from_rational(p, q) } @@ -264,37 +327,38 @@ enum Rounding { /// bounds instead of overflowing. fn saturating_reciprocal_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Saturating + Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem + + Saturating + + Unsigned, P: PerThing, P::Inner: Into, { let maximum: N = P::ACCURACY.into(); - let c = rational_mul_correction::( - x.clone(), - P::ACCURACY, - part, - rounding, - ); + let c = rational_mul_correction::(x.clone(), P::ACCURACY, part, rounding); (x / part.into()).saturating_mul(maximum).saturating_add(c) } /// Overflow-prune multiplication. Accurately multiply a value by `self` without overflowing. fn overflow_prune_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Unsigned, + N: Clone + + UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem + + Unsigned, P: PerThing, P::Inner: Into, { let maximum: N = P::ACCURACY.into(); let part_n: N = part.into(); - let c = rational_mul_correction::( - x.clone(), - part, - P::ACCURACY, - rounding, - ); + let c = rational_mul_correction::(x.clone(), part, P::ACCURACY, rounding); (x / maximum) * part_n + c } @@ -304,10 +368,14 @@ where /// to `x / denom * numer` for an accurate result. fn rational_mul_correction(x: N, numer: P::Inner, denom: P::Inner, rounding: Rounding) -> N where - N: UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Unsigned, + N: UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem + + Unsigned, P: PerThing, - P::Inner: Into + P::Inner: Into, { let numer_upper = P::Upper::from(numer); let denom_n: N = denom.into(); @@ -324,16 +392,18 @@ where // Already rounded down Rounding::Down => {}, // Round up if the fractional part of the result is non-zero. - Rounding::Up => if rem_mul_upper % denom_upper > 0.into() { - // `rem * numer / denom` is less than `numer`, so this will not overflow. - rem_mul_div_inner += 1.into(); - }, + Rounding::Up => + if rem_mul_upper % denom_upper > 0.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner += 1.into(); + }, // Round up if the fractional part of the result is greater than a half. An exact half is // rounded down. - Rounding::Nearest => if rem_mul_upper % denom_upper > denom_upper / 2.into() { - // `rem * numer / denom` is less than `numer`, so this will not overflow. - rem_mul_div_inner += 1.into(); - }, + Rounding::Nearest => + if rem_mul_upper % denom_upper > denom_upper / 2.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner += 1.into(); + }, } rem_mul_div_inner.into() } @@ -1331,15 +1401,7 @@ macro_rules! implement_per_thing_with_perthousand { } } -implement_per_thing!( - Percent, - test_per_cent, - [u32, u64, u128], - 100u8, - u8, - u16, - "_Percent_", -); +implement_per_thing!(Percent, test_per_cent, [u32, u64, u128], 100u8, u8, u16, "_Percent_",); implement_per_thing_with_perthousand!( PerU16, test_peru16, diff --git a/substrate/primitives/arithmetic/src/rational.rs b/substrate/primitives/arithmetic/src/rational.rs index feb81eb57206852adfdd531952c13279bd72371b..a15f5ac8c165036f9c3ae6ea7151aebde1c96480 100644 --- a/substrate/primitives/arithmetic/src/rational.rs +++ b/substrate/primitives/arithmetic/src/rational.rs @@ -15,10 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::{biguint::BigUint, helpers_128bit}; +use num_traits::{Bounded, One, Zero}; use sp_std::{cmp::Ordering, prelude::*}; -use crate::helpers_128bit; -use num_traits::{Zero, One, Bounded}; -use crate::biguint::BigUint; /// A wrapper for any rational number with infinitely large numerator and denominator. /// @@ -160,9 +159,11 @@ impl Rational128 { /// accurately calculated. pub fn lcm(&self, other: &Self) -> Result { // this should be tested better: two large numbers that are almost the same. - if self.1 == other.1 { return Ok(self.1) } + if self.1 == other.1 { + return Ok(self.1) + } let g = helpers_128bit::gcd(self.1, other.1); - helpers_128bit::multiply_by_rational(self.1 , other.1, g) + helpers_128bit::multiply_by_rational(self.1, other.1, g) } /// A saturating add that assumes `self` and `other` have the same denominator. @@ -170,7 +171,7 @@ impl Rational128 { if other.is_zero() { self } else { - Self(self.0.saturating_add(other.0) ,self.1) + Self(self.0.saturating_add(other.0), self.1) } } @@ -179,7 +180,7 @@ impl Rational128 { if other.is_zero() { self } else { - Self(self.0.saturating_sub(other.0) ,self.1) + Self(self.0.saturating_sub(other.0), self.1) } } @@ -190,7 +191,9 @@ impl Rational128 { let lcm = self.lcm(&other).map_err(|_| "failed to scale to denominator")?; let self_scaled = self.to_den(lcm).map_err(|_| "failed to scale to denominator")?; let other_scaled = other.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - let n = self_scaled.0.checked_add(other_scaled.0) + let n = self_scaled + .0 + .checked_add(other_scaled.0) .ok_or("overflow while adding numerators")?; Ok(Self(n, self_scaled.1)) } @@ -203,7 +206,9 @@ impl Rational128 { let self_scaled = self.to_den(lcm).map_err(|_| "failed to scale to denominator")?; let other_scaled = other.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - let n = self_scaled.0.checked_sub(other_scaled.0) + let n = self_scaled + .0 + .checked_sub(other_scaled.0) .ok_or("overflow while subtracting numerators")?; Ok(Self(n, self_scaled.1)) } @@ -243,7 +248,8 @@ impl Ord for Rational128 { } else { // Don't even compute gcd. let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); - let other_n = helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); + let other_n = + helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); self_n.cmp(&other_n) } } @@ -256,7 +262,8 @@ impl PartialEq for Rational128 { self.0.eq(&other.0) } else { let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); - let other_n = helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); + let other_n = + helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); self_n.eq(&other_n) } } @@ -264,8 +271,7 @@ impl PartialEq for Rational128 { #[cfg(test)] mod tests { - use super::*; - use super::helpers_128bit::*; + use super::{helpers_128bit::*, *}; const MAX128: u128 = u128::MAX; const MAX64: u128 = u64::MAX as u128; @@ -277,7 +283,9 @@ mod tests { fn mul_div(a: u128, b: u128, c: u128) -> u128 { use primitive_types::U256; - if a.is_zero() { return Zero::zero(); } + if a.is_zero() { + return Zero::zero() + } let c = c.max(1); // e for extended @@ -295,14 +303,8 @@ mod tests { #[test] fn truth_value_function_works() { - assert_eq!( - mul_div(2u128.pow(100), 8, 4), - 2u128.pow(101) - ); - assert_eq!( - mul_div(2u128.pow(100), 4, 8), - 2u128.pow(99) - ); + assert_eq!(mul_div(2u128.pow(100), 8, 4), 2u128.pow(101)); + assert_eq!(mul_div(2u128.pow(100), 4, 8), 2u128.pow(99)); // and it returns a if result cannot fit assert_eq!(mul_div(MAX128 - 10, 2, 1), MAX128 - 10); @@ -319,13 +321,10 @@ mod tests { assert_eq!(r(MAX128 / 2, MAX128).to_den(10), Ok(r(5, 10))); // large to perbill. This is very well needed for npos-elections. - assert_eq!( - r(MAX128 / 2, MAX128).to_den(1000_000_000), - Ok(r(500_000_000, 1000_000_000)) - ); + assert_eq!(r(MAX128 / 2, MAX128).to_den(1000_000_000), Ok(r(500_000_000, 1000_000_000))); // large to large - assert_eq!(r(MAX128 / 2, MAX128).to_den(MAX128/2), Ok(r(MAX128/4, MAX128/2))); + assert_eq!(r(MAX128 / 2, MAX128).to_den(MAX128 / 2), Ok(r(MAX128 / 4, MAX128 / 2))); } #[test] @@ -343,11 +342,11 @@ mod tests { // large numbers assert_eq!( - r(1_000_000_000, MAX128).lcm(&r(7_000_000_000, MAX128-1)), + r(1_000_000_000, MAX128).lcm(&r(7_000_000_000, MAX128 - 1)), Err("result cannot fit in u128"), ); assert_eq!( - r(1_000_000_000, MAX64).lcm(&r(7_000_000_000, MAX64-1)), + r(1_000_000_000, MAX64).lcm(&r(7_000_000_000, MAX64 - 1)), Ok(340282366920938463408034375210639556610), ); assert!(340282366920938463408034375210639556610 < MAX128); @@ -362,7 +361,7 @@ mod tests { // errors assert_eq!( - r(1, MAX128).checked_add(r(1, MAX128-1)), + r(1, MAX128).checked_add(r(1, MAX128 - 1)), Err("failed to scale to denominator"), ); assert_eq!( @@ -383,17 +382,14 @@ mod tests { // errors assert_eq!( - r(2, MAX128).checked_sub(r(1, MAX128-1)), + r(2, MAX128).checked_sub(r(1, MAX128 - 1)), Err("failed to scale to denominator"), ); assert_eq!( r(7, MAX128).checked_sub(r(MAX128, MAX128)), Err("overflow while subtracting numerators"), ); - assert_eq!( - r(1, 10).checked_sub(r(2,10)), - Err("overflow while subtracting numerators"), - ); + assert_eq!(r(1, 10).checked_sub(r(2, 10)), Err("overflow while subtracting numerators"),); } #[test] @@ -428,7 +424,7 @@ mod tests { ); assert_eq!( // MAX128 % 7 == 3 - multiply_by_rational(MAX128, 11 , 13).unwrap(), + multiply_by_rational(MAX128, 11, 13).unwrap(), (MAX128 / 13 * 11) + (8 * 11 / 13), ); assert_eq!( @@ -437,14 +433,8 @@ mod tests { (MAX128 / 1000 * 555) + (455 * 555 / 1000), ); - assert_eq!( - multiply_by_rational(2 * MAX64 - 1, MAX64, MAX64).unwrap(), - 2 * MAX64 - 1, - ); - assert_eq!( - multiply_by_rational(2 * MAX64 - 1, MAX64 - 1, MAX64).unwrap(), - 2 * MAX64 - 3, - ); + assert_eq!(multiply_by_rational(2 * MAX64 - 1, MAX64, MAX64).unwrap(), 2 * MAX64 - 1,); + assert_eq!(multiply_by_rational(2 * MAX64 - 1, MAX64 - 1, MAX64).unwrap(), 2 * MAX64 - 3,); assert_eq!( multiply_by_rational(MAX64 + 100, MAX64_2, MAX64_2 / 2).unwrap(), @@ -459,31 +449,23 @@ mod tests { multiply_by_rational(2u128.pow(66) - 1, 2u128.pow(65) - 1, 2u128.pow(65)).unwrap(), 73786976294838206461, ); - assert_eq!( - multiply_by_rational(1_000_000_000, MAX128 / 8, MAX128 / 2).unwrap(), - 250000000, - ); + assert_eq!(multiply_by_rational(1_000_000_000, MAX128 / 8, MAX128 / 2).unwrap(), 250000000,); assert_eq!( multiply_by_rational( 29459999999999999988000u128, 1000000000000000000u128, 10000000000000000000u128 - ).unwrap(), + ) + .unwrap(), 2945999999999999998800u128 ); } #[test] fn multiply_by_rational_a_b_are_interchangeable() { - assert_eq!( - multiply_by_rational(10, MAX128, MAX128 / 2), - Ok(20), - ); - assert_eq!( - multiply_by_rational(MAX128, 10, MAX128 / 2), - Ok(20), - ); + assert_eq!(multiply_by_rational(10, MAX128, MAX128 / 2), Ok(20),); + assert_eq!(multiply_by_rational(MAX128, 10, MAX128 / 2), Ok(20),); } #[test] diff --git a/substrate/primitives/arithmetic/src/traits.rs b/substrate/primitives/arithmetic/src/traits.rs index d0ce921d9d342d90d19c05ad1f306e8b95c82461..a441a0dcbc08d4dff0c741255839871fc037bc0d 100644 --- a/substrate/primitives/arithmetic/src/traits.rs +++ b/substrate/primitives/arithmetic/src/traits.rs @@ -17,58 +17,129 @@ //! Primitive traits for the runtime arithmetic. -use sp_std::{self, convert::{TryFrom, TryInto}}; use codec::HasCompact; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ - Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, CheckedNeg, - CheckedShl, CheckedShr, checked_pow, Signed, Unsigned, + checked_pow, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedNeg, CheckedShl, CheckedShr, + CheckedSub, One, Signed, Unsigned, Zero, }; -use sp_std::ops::{ - Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, - RemAssign, Shl, Shr +use sp_std::{ + self, + convert::{TryFrom, TryInto}, + ops::{ + Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, RemAssign, Shl, Shr, Sub, SubAssign, + }, }; /// A meta trait for arithmetic type operations, regardless of any limitation on size. pub trait BaseArithmetic: - From + - Zero + One + IntegerSquareRoot + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + CheckedShr + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Saturating + - PartialOrd + Ord + Bounded + HasCompact + Sized + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto -{} - -impl + - Zero + One + IntegerSquareRoot + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + CheckedShr + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Saturating + - PartialOrd + Ord + Bounded + HasCompact + Sized + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto -> BaseArithmetic for T {} + From + + Zero + + One + + IntegerSquareRoot + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact + + Sized + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto +{ +} + +impl< + T: From + + Zero + + One + + IntegerSquareRoot + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact + + Sized + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto, + > BaseArithmetic for T +{ +} /// A meta trait for arithmetic. /// @@ -129,35 +200,49 @@ pub trait Saturating { fn saturating_pow(self, exp: usize) -> Self; /// Increment self by one, saturating. - fn saturating_inc(&mut self) where Self: One { + fn saturating_inc(&mut self) + where + Self: One, + { let mut o = Self::one(); sp_std::mem::swap(&mut o, self); *self = o.saturating_add(One::one()); } /// Decrement self by one, saturating at zero. - fn saturating_dec(&mut self) where Self: One { + fn saturating_dec(&mut self) + where + Self: One, + { let mut o = Self::one(); sp_std::mem::swap(&mut o, self); *self = o.saturating_sub(One::one()); } /// Increment self by some `amount`, saturating. - fn saturating_accrue(&mut self, amount: Self) where Self: One { + fn saturating_accrue(&mut self, amount: Self) + where + Self: One, + { let mut o = Self::one(); sp_std::mem::swap(&mut o, self); *self = o.saturating_add(amount); } /// Decrement self by some `amount`, saturating at zero. - fn saturating_reduce(&mut self, amount: Self) where Self: One { + fn saturating_reduce(&mut self, amount: Self) + where + Self: One, + { let mut o = Self::one(); sp_std::mem::swap(&mut o, self); *self = o.saturating_sub(amount); } } -impl Saturating for T { +impl Saturating + for T +{ fn saturating_add(self, o: Self) -> Self { ::saturating_add(self, o) } @@ -167,26 +252,24 @@ impl Self { - self.checked_mul(&o) - .unwrap_or_else(|| - if (self < T::zero()) != (o < T::zero()) { - Bounded::min_value() - } else { - Bounded::max_value() - } - ) + self.checked_mul(&o).unwrap_or_else(|| { + if (self < T::zero()) != (o < T::zero()) { + Bounded::min_value() + } else { + Bounded::max_value() + } + }) } fn saturating_pow(self, exp: usize) -> Self { let neg = self < T::zero() && exp % 2 != 0; - checked_pow(self, exp) - .unwrap_or_else(|| - if neg { - Bounded::min_value() - } else { - Bounded::max_value() - } - ) + checked_pow(self, exp).unwrap_or_else(|| { + if neg { + Bounded::min_value() + } else { + Bounded::max_value() + } + }) } } @@ -199,7 +282,10 @@ pub trait SaturatedConversion { /// This just uses `UniqueSaturatedFrom` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn saturated_from(t: T) -> Self where Self: UniqueSaturatedFrom { + fn saturated_from(t: T) -> Self + where + Self: UniqueSaturatedFrom, + { >::unique_saturated_from(t) } @@ -208,7 +294,10 @@ pub trait SaturatedConversion { /// This just uses `UniqueSaturatedInto` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn saturated_into(self) -> T where Self: UniqueSaturatedInto { + fn saturated_into(self) -> T + where + Self: UniqueSaturatedInto, + { >::unique_saturated_into(self) } } diff --git a/substrate/primitives/authority-discovery/src/lib.rs b/substrate/primitives/authority-discovery/src/lib.rs index b04ce43a2c74770f074d73e824793829d7f571e4..871a35e6bf487e3b1f2170d1433b0f12b5e08fda 100644 --- a/substrate/primitives/authority-discovery/src/lib.rs +++ b/substrate/primitives/authority-discovery/src/lib.rs @@ -22,11 +22,7 @@ use sp_std::vec::Vec; mod app { - use sp_application_crypto::{ - key_types::AUTHORITY_DISCOVERY, - app_crypto, - sr25519, - }; + use sp_application_crypto::{app_crypto, key_types::AUTHORITY_DISCOVERY, sr25519}; app_crypto!(sr25519, AUTHORITY_DISCOVERY); } diff --git a/substrate/primitives/authorship/src/lib.rs b/substrate/primitives/authorship/src/lib.rs index 1350fa17ff301dee0d025083e8c5523bbc6a702b..254078b8445aec3225c288f531061a3e458acd03 100644 --- a/substrate/primitives/authorship/src/lib.rs +++ b/substrate/primitives/authorship/src/lib.rs @@ -19,11 +19,11 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result::Result, prelude::*}; +use sp_std::{prelude::*, result::Result}; -use codec::{Encode, Decode}; -use sp_inherents::{Error, InherentIdentifier, InherentData, IsFatalError}; -use sp_runtime::{RuntimeString, traits::Header as HeaderT}; +use codec::{Decode, Encode}; +use sp_inherents::{Error, InherentData, InherentIdentifier, IsFatalError}; +use sp_runtime::{traits::Header as HeaderT, RuntimeString}; /// The identifier for the `uncles` inherent. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"uncles00"; diff --git a/substrate/primitives/blockchain/src/backend.rs b/substrate/primitives/blockchain/src/backend.rs index dbce364ce7987c15ec485a5c9e68daed9e55440d..642e7c5b9528f2ef7a252b76864b9d68b7ba7c92 100644 --- a/substrate/primitives/blockchain/src/backend.rs +++ b/substrate/primitives/blockchain/src/backend.rs @@ -19,11 +19,13 @@ use std::sync::Arc; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::generic::BlockId; -use sp_runtime::Justifications; use log::warn; use parking_lot::RwLock; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + Justifications, +}; use crate::header_metadata::HeaderMetadata; @@ -38,7 +40,10 @@ pub trait HeaderBackend: Send + Sync { /// Get block status. fn status(&self, id: BlockId) -> Result; /// Get block number by hash. Returns `None` if the header is not in the chain. - fn number(&self, hash: Block::Hash) -> Result::Header as HeaderT>::Number>>; + fn number( + &self, + hash: Block::Hash, + ) -> Result::Header as HeaderT>::Number>>; /// Get block hash by number. Returns `None` if the header is not in the chain. fn hash(&self, number: NumberFor) -> Result>; @@ -60,28 +65,29 @@ pub trait HeaderBackend: Send + Sync { /// Get block header. Returns `UnknownBlock` error if block is not found. fn expect_header(&self, id: BlockId) -> Result { - self.header(id)?.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) + self.header(id)? + .ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) } /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is not found. fn expect_block_number_from_id(&self, id: &BlockId) -> Result> { - self.block_number_from_id(id) - .and_then(|n| n.ok_or_else(|| - Error::UnknownBlock(format!("Expect block number from id: {}", id)) - )) + self.block_number_from_id(id).and_then(|n| { + n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id))) + }) } /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is not found. fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { - self.block_hash_from_id(id) - .and_then(|n| n.ok_or_else(|| - Error::UnknownBlock(format!("Expect block hash from id: {}", id)) - )) + self.block_hash_from_id(id).and_then(|n| { + n.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) + }) } } /// Blockchain database backend. Does not perform any validation. -pub trait Backend: HeaderBackend + HeaderMetadata { +pub trait Backend: + HeaderBackend + HeaderMetadata +{ /// Get block body. Returns `None` if block is not found. fn body(&self, id: BlockId) -> Result::Extrinsic>>>; /// Get block justifications. Returns `None` if no justification exists. @@ -120,14 +126,14 @@ pub trait Backend: HeaderBackend + HeaderMetadata x, // target not in blockchain - None => { return Ok(None); }, + None => return Ok(None), } }; if let Some(max_number) = maybe_max_number { // target outside search range if target_header.number() > &max_number { - return Ok(None); + return Ok(None) } } @@ -148,12 +154,12 @@ pub trait Backend: HeaderBackend + HeaderMetadata= *target_header.number() { // header is on a dead fork. - return Ok(None); + return Ok(None) } self.leaves()? @@ -171,12 +177,13 @@ pub trait Backend: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata: HeaderBackend + HeaderMetadata Result>>; + fn indexed_transaction(&self, hash: &Block::Hash) -> Result>>; /// Check if indexed transaction exists. fn has_indexed_transaction(&self, hash: &Block::Hash) -> Result { @@ -253,7 +257,9 @@ pub trait Cache: Send + Sync { &self, key: &well_known_cache_keys::Id, block: &BlockId, - ) -> Result, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>>; + ) -> Result< + Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, + >; } /// Blockchain info @@ -272,7 +278,7 @@ pub struct Info { /// Last finalized state. pub finalized_state: Option<(Block::Hash, <::Header as HeaderT>::Number)>, /// Number of concurrent leave forks. - pub number_leaves: usize + pub number_leaves: usize, } /// Block status. diff --git a/substrate/primitives/blockchain/src/error.rs b/substrate/primitives/blockchain/src/error.rs index 0d6ac10a8800e9280a5e5e0b0c924c4fae200919..bc27c36401e890bf93453aeb7f2a7a08377a7609 100644 --- a/substrate/primitives/blockchain/src/error.rs +++ b/substrate/primitives/blockchain/src/error.rs @@ -17,12 +17,12 @@ //! Substrate client possible errors. -use std::{self, result}; -use sp_state_machine; -use sp_runtime::transaction_validity::TransactionValidityError; -use sp_consensus; use codec::Error as CodecError; use sp_api::ApiError; +use sp_consensus; +use sp_runtime::transaction_validity::TransactionValidityError; +use sp_state_machine; +use std::{self, result}; /// Client Result type alias pub type Result = result::Result; @@ -205,7 +205,10 @@ impl Error { /// Construct from a state db error. // Can not be done directly, since that would make cargo run out of stack if // `sc-state-db` is lib is added as dependency. - pub fn from_state_db(e: E) -> Self where E: std::fmt::Debug { + pub fn from_state_db(e: E) -> Self + where + E: std::fmt::Debug, + { Error::StateDatabase(format!("{:?}", e)) } } diff --git a/substrate/primitives/blockchain/src/header_metadata.rs b/substrate/primitives/blockchain/src/header_metadata.rs index 87d0057f32c243d9db27e95cb2f61cecb900ab77..928409963bcd4a74952f1446ae70476624c592f9 100644 --- a/substrate/primitives/blockchain/src/header_metadata.rs +++ b/substrate/primitives/blockchain/src/header_metadata.rs @@ -18,9 +18,9 @@ //! Implements tree backend, cached header metadata and algorithms //! to compute routes efficiently over the tree of headers. -use sp_runtime::traits::{Block as BlockT, NumberFor, Header}; -use parking_lot::RwLock; use lru::LruCache; +use parking_lot::RwLock; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; /// Set to the expected max difference between `best` and `finalized` blocks at sync. const LRU_CACHE_SIZE: usize = 5_000; @@ -86,10 +86,7 @@ pub fn lowest_common_ancestor + ?Sized>( backend.insert_header_metadata(orig_header_two.hash, orig_header_two); } - Ok(HashAndNumber { - hash: header_one.hash, - number: header_one.number, - }) + Ok(HashAndNumber { hash: header_one.hash, number: header_one.number }) } /// Compute a tree-route between two blocks. See tree-route docs for more details. @@ -105,51 +102,33 @@ pub fn tree_route>( let mut to_branch = Vec::new(); while to.number > from.number { - to_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); + to_branch.push(HashAndNumber { number: to.number, hash: to.hash }); to = backend.header_metadata(to.parent)?; } while from.number > to.number { - from_branch.push(HashAndNumber { - number: from.number, - hash: from.hash, - }); + from_branch.push(HashAndNumber { number: from.number, hash: from.hash }); from = backend.header_metadata(from.parent)?; } // numbers are equal now. walk backwards until the block is the same while to.hash != from.hash { - to_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); + to_branch.push(HashAndNumber { number: to.number, hash: to.hash }); to = backend.header_metadata(to.parent)?; - from_branch.push(HashAndNumber { - number: from.number, - hash: from.hash, - }); + from_branch.push(HashAndNumber { number: from.number, hash: from.hash }); from = backend.header_metadata(from.parent)?; } // add the pivot block. and append the reversed to-branch // (note that it's reverse order originals) let pivot = from_branch.len(); - from_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); + from_branch.push(HashAndNumber { number: to.number, hash: to.hash }); from_branch.extend(to_branch.into_iter().rev()); - Ok(TreeRoute { - route: from_branch, - pivot, - }) + Ok(TreeRoute { route: from_branch, pivot }) } /// Hash and number of a block. @@ -204,14 +183,16 @@ impl TreeRoute { /// Get the common ancestor block. This might be one of the two blocks of the /// route. pub fn common_block(&self) -> &HashAndNumber { - self.route.get(self.pivot).expect("tree-routes are computed between blocks; \ + self.route.get(self.pivot).expect( + "tree-routes are computed between blocks; \ which are included in the route; \ - thus it is never empty; qed") + thus it is never empty; qed", + ) } /// Get a slice of enacted blocks (descendents of the common ancestor) pub fn enacted(&self) -> &[HashAndNumber] { - &self.route[self.pivot + 1 ..] + &self.route[self.pivot + 1..] } } @@ -240,17 +221,13 @@ pub struct HeaderMetadataCache { impl HeaderMetadataCache { /// Creates a new LRU header metadata cache with `capacity`. pub fn new(capacity: usize) -> Self { - HeaderMetadataCache { - cache: RwLock::new(LruCache::new(capacity)), - } + HeaderMetadataCache { cache: RwLock::new(LruCache::new(capacity)) } } } impl Default for HeaderMetadataCache { fn default() -> Self { - HeaderMetadataCache { - cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)), - } + HeaderMetadataCache { cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)) } } } diff --git a/substrate/primitives/blockchain/src/lib.rs b/substrate/primitives/blockchain/src/lib.rs index 696050f57ac89994ac2e19105d5941f6e1167c8c..cd36cabe15517e934012028e22adaed03c90b184 100644 --- a/substrate/primitives/blockchain/src/lib.rs +++ b/substrate/primitives/blockchain/src/lib.rs @@ -18,9 +18,9 @@ //! Substrate blockchain traits and primitives. mod backend; -mod header_metadata; mod error; +mod header_metadata; -pub use error::*; pub use backend::*; +pub use error::*; pub use header_metadata::*; diff --git a/substrate/primitives/consensus/aura/src/digests.rs b/substrate/primitives/consensus/aura/src/digests.rs index e93214eeb4bacc4ce5df4e3e7ff874931942eed6..eaa29036d98a1f1a578c6401c28846caa910b602 100644 --- a/substrate/primitives/consensus/aura/src/digests.rs +++ b/substrate/primitives/consensus/aura/src/digests.rs @@ -22,9 +22,9 @@ //! `CompatibleDigestItem` trait to appear in public interfaces. use crate::AURA_ENGINE_ID; -use sp_runtime::generic::DigestItem; +use codec::{Codec, Encode}; use sp_consensus_slots::Slot; -use codec::{Encode, Codec}; +use sp_runtime::generic::DigestItem; use sp_std::fmt::Debug; /// A digest item which is usable with aura consensus. @@ -42,9 +42,10 @@ pub trait CompatibleDigestItem: Sized { fn as_aura_pre_digest(&self) -> Option; } -impl CompatibleDigestItem for DigestItem where +impl CompatibleDigestItem for DigestItem +where Signature: Codec, - Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static + Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static, { fn aura_seal(signature: Signature) -> Self { DigestItem::Seal(AURA_ENGINE_ID, signature.encode()) diff --git a/substrate/primitives/consensus/aura/src/inherents.rs b/substrate/primitives/consensus/aura/src/inherents.rs index 294f544f6725a57e198b502133fada6e43227786..2a797b5d3f39387de2b5ae16ca521a618db1fb06 100644 --- a/substrate/primitives/consensus/aura/src/inherents.rs +++ b/substrate/primitives/consensus/aura/src/inherents.rs @@ -16,8 +16,7 @@ // limitations under the License. /// Contains the inherents for the AURA module - -use sp_inherents::{InherentIdentifier, InherentData, Error}; +use sp_inherents::{Error, InherentData, InherentIdentifier}; /// The Aura inherent identifier. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"auraslot"; @@ -28,13 +27,13 @@ pub type InherentType = sp_consensus_slots::Slot; /// Auxiliary trait to extract Aura inherent data. pub trait AuraInherentData { /// Get aura inherent data. - fn aura_inherent_data(&self) ->Result, Error>; + fn aura_inherent_data(&self) -> Result, Error>; /// Replace aura inherent data. fn aura_replace_inherent_data(&mut self, new: InherentType); } impl AuraInherentData for InherentData { - fn aura_inherent_data(&self) ->Result, Error> { + fn aura_inherent_data(&self) -> Result, Error> { self.get_data(&INHERENT_IDENTIFIER) } @@ -54,9 +53,7 @@ pub struct InherentDataProvider { impl InherentDataProvider { /// Create a new instance with the given slot. pub fn new(slot: InherentType) -> Self { - Self { - slot, - } + Self { slot } } /// Creates the inherent data provider by calculating the slot from the given @@ -65,13 +62,10 @@ impl InherentDataProvider { timestamp: sp_timestamp::Timestamp, duration: std::time::Duration, ) -> Self { - let slot = InherentType::from( - (timestamp.as_duration().as_millis() / duration.as_millis()) as u64 - ); + let slot = + InherentType::from((timestamp.as_duration().as_millis() / duration.as_millis()) as u64); - Self { - slot, - } + Self { slot } } } @@ -87,10 +81,7 @@ impl sp_std::ops::Deref for InherentDataProvider { #[cfg(feature = "std")] #[async_trait::async_trait] impl sp_inherents::InherentDataProvider for InherentDataProvider { - fn provide_inherent_data( - &self, - inherent_data: &mut InherentData, - ) ->Result<(), Error> { + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { inherent_data.put_data(INHERENT_IDENTIFIER, &self.slot) } diff --git a/substrate/primitives/consensus/aura/src/lib.rs b/substrate/primitives/consensus/aura/src/lib.rs index a28e681fda27f8361d27603abaa47eb895b182fb..e6a319c1d1590f714b2fc11aa003e4a2a28b2b09 100644 --- a/substrate/primitives/consensus/aura/src/lib.rs +++ b/substrate/primitives/consensus/aura/src/lib.rs @@ -19,9 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode, Codec}; -use sp_std::vec::Vec; +use codec::{Codec, Decode, Encode}; use sp_runtime::ConsensusEngineId; +use sp_std::vec::Vec; pub mod digests; pub mod inherents; @@ -46,7 +46,7 @@ pub mod sr25519 { pub mod ed25519 { mod app_ed25519 { - use sp_application_crypto::{app_crypto, key_types::AURA, ed25519}; + use sp_application_crypto::{app_crypto, ed25519, key_types::AURA}; app_crypto!(ed25519, AURA); } diff --git a/substrate/primitives/consensus/babe/src/digests.rs b/substrate/primitives/consensus/babe/src/digests.rs index f34a38bc8b0166a9d8035f975197a699e83a2b3f..682894f5837b19193dfa03661cfe7c0cf3b002e6 100644 --- a/substrate/primitives/consensus/babe/src/digests.rs +++ b/substrate/primitives/consensus/babe/src/digests.rs @@ -22,8 +22,8 @@ use super::{ BabeEpochConfiguration, Slot, BABE_ENGINE_ID, }; use codec::{Codec, Decode, Encode}; -use sp_std::vec::Vec; use sp_runtime::{DigestItem, RuntimeDebug}; +use sp_std::vec::Vec; use sp_consensus_vrf::schnorrkel::{Randomness, VRFOutput, VRFProof}; @@ -143,14 +143,13 @@ pub enum NextConfigDescriptor { c: (u64, u64), /// Value of `allowed_slots` in `BabeEpochConfiguration`. allowed_slots: AllowedSlots, - } + }, } impl From for BabeEpochConfiguration { fn from(desc: NextConfigDescriptor) -> Self { match desc { - NextConfigDescriptor::V1 { c, allowed_slots } => - Self { c, allowed_slots }, + NextConfigDescriptor::V1 { c, allowed_slots } => Self { c, allowed_slots }, } } } @@ -176,8 +175,9 @@ pub trait CompatibleDigestItem: Sized { fn as_next_config_descriptor(&self) -> Option; } -impl CompatibleDigestItem for DigestItem where - Hash: Send + Sync + Eq + Clone + Codec + 'static +impl CompatibleDigestItem for DigestItem +where + Hash: Send + Sync + Eq + Clone + Codec + 'static, { fn babe_pre_digest(digest: PreDigest) -> Self { DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) diff --git a/substrate/primitives/consensus/babe/src/inherents.rs b/substrate/primitives/consensus/babe/src/inherents.rs index e160ca8644bc2ac2c2997b466fc05bb8bc2d12f3..cecd61998a4dbd8f7c2b9795a8b3d3be8e2ca36a 100644 --- a/substrate/primitives/consensus/babe/src/inherents.rs +++ b/substrate/primitives/consensus/babe/src/inherents.rs @@ -17,7 +17,7 @@ //! Inherents for BABE -use sp_inherents::{InherentData, InherentIdentifier, Error}; +use sp_inherents::{Error, InherentData, InherentIdentifier}; use sp_std::result::Result; @@ -64,13 +64,10 @@ impl InherentDataProvider { timestamp: sp_timestamp::Timestamp, duration: std::time::Duration, ) -> Self { - let slot = InherentType::from( - (timestamp.as_duration().as_millis() / duration.as_millis()) as u64 - ); + let slot = + InherentType::from((timestamp.as_duration().as_millis() / duration.as_millis()) as u64); - Self { - slot, - } + Self { slot } } /// Returns the `slot` of this inherent data provider. diff --git a/substrate/primitives/consensus/babe/src/lib.rs b/substrate/primitives/consensus/babe/src/lib.rs index 3609a0b8ce32cf690d6122c7049c1822c818e77a..3f2fc7e1f5e6ac2fcce65071bbf47250f3e88138 100644 --- a/substrate/primitives/consensus/babe/src/lib.rs +++ b/substrate/primitives/consensus/babe/src/lib.rs @@ -30,7 +30,7 @@ pub use sp_consensus_vrf::schnorrkel::{ use codec::{Decode, Encode}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use sp_keystore::vrf::{VRFTranscriptData, VRFTranscriptValue}; use sp_runtime::{traits::Header, ConsensusEngineId, RuntimeDebug}; @@ -96,11 +96,7 @@ pub type BabeAuthorityWeight = u64; pub type BabeBlockWeight = u32; /// Make a VRF transcript from given randomness, slot number and epoch. -pub fn make_transcript( - randomness: &Randomness, - slot: Slot, - epoch: u64, -) -> Transcript { +pub fn make_transcript(randomness: &Randomness, slot: Slot, epoch: u64) -> Transcript { let mut transcript = Transcript::new(&BABE_ENGINE_ID); transcript.append_u64(b"slot number", *slot); transcript.append_u64(b"current epoch", epoch); @@ -110,18 +106,14 @@ pub fn make_transcript( /// Make a VRF transcript data container #[cfg(feature = "std")] -pub fn make_transcript_data( - randomness: &Randomness, - slot: Slot, - epoch: u64, -) -> VRFTranscriptData { +pub fn make_transcript_data(randomness: &Randomness, slot: Slot, epoch: u64) -> VRFTranscriptData { VRFTranscriptData { label: &BABE_ENGINE_ID, items: vec![ ("slot number", VRFTranscriptValue::U64(*slot)), ("current epoch", VRFTranscriptValue::U64(epoch)), ("chain randomness", VRFTranscriptValue::Bytes(randomness.to_vec())), - ] + ], } } @@ -280,20 +272,15 @@ where use digests::*; use sp_application_crypto::RuntimeAppPublic; - let find_pre_digest = |header: &H| { - header - .digest() - .logs() - .iter() - .find_map(|log| log.as_babe_pre_digest()) - }; + let find_pre_digest = + |header: &H| header.digest().logs().iter().find_map(|log| log.as_babe_pre_digest()); let verify_seal_signature = |mut header: H, offender: &AuthorityId| { let seal = header.digest_mut().pop()?.as_babe_seal()?; let pre_hash = header.hash(); if !offender.verify(&pre_hash.as_ref(), &seal) { - return None; + return None } Some(()) @@ -302,7 +289,7 @@ where let verify_proof = || { // we must have different headers for the equivocation to be valid if proof.first_header.hash() == proof.second_header.hash() { - return None; + return None } let first_pre_digest = find_pre_digest(&proof.first_header)?; @@ -313,12 +300,12 @@ where if proof.slot != first_pre_digest.slot() || first_pre_digest.slot() != second_pre_digest.slot() { - return None; + return None } // both headers must have been authored by the same authority if first_pre_digest.authority_index() != second_pre_digest.authority_index() { - return None; + return None } // we finally verify that the expected authority has signed both headers and diff --git a/substrate/primitives/consensus/common/src/block_import.rs b/substrate/primitives/consensus/common/src/block_import.rs index a444e15095ef6d8c6449c949875a368654fa904f..c742e24a0cc0143759efd6de6174100e326f6d80 100644 --- a/substrate/primitives/consensus/common/src/block_import.rs +++ b/substrate/primitives/consensus/common/src/block_import.rs @@ -17,16 +17,14 @@ //! Block import helpers. -use sp_runtime::traits::{Block as BlockT, DigestItemFor, Header as HeaderT, NumberFor, HashFor}; -use sp_runtime::{Justification, Justifications}; -use serde::{Serialize, Deserialize}; -use std::borrow::Cow; -use std::collections::HashMap; -use std::sync::Arc; -use std::any::Any; +use serde::{Deserialize, Serialize}; +use sp_runtime::{ + traits::{Block as BlockT, DigestItemFor, HashFor, Header as HeaderT, NumberFor}, + Justification, Justifications, +}; +use std::{any::Any, borrow::Cow, collections::HashMap, sync::Arc}; -use crate::Error; -use crate::import_queue::CacheKeyId; +use crate::{import_queue::CacheKeyId, Error}; /// Block import result. #[derive(Debug, PartialEq, Eq)] @@ -88,8 +86,8 @@ impl ImportResult { if aux.needs_justification { justification_sync_link.request_justification(hash, number); } - } - _ => {} + }, + _ => {}, } } } @@ -154,9 +152,7 @@ pub struct ImportedState { impl std::fmt::Debug for ImportedState { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("ImportedState") - .field("block", &self.block) - .finish() + fmt.debug_struct("ImportedState").field("block", &self.block).finish() } } @@ -226,12 +222,10 @@ pub struct BlockImportParams { impl BlockImportParams { /// Create a new block import params. - pub fn new( - origin: BlockOrigin, - header: Block::Header, - ) -> Self { + pub fn new(origin: BlockOrigin, header: Block::Header) -> Self { Self { - origin, header, + origin, + header, justifications: None, post_digests: Vec::new(), body: None, @@ -273,7 +267,9 @@ impl BlockImportParams { /// /// Actually this just sets `StorageChanges::Changes` to `None` and makes rustc think that `Self` now /// uses a different transaction type. - pub fn clear_storage_changes_and_mutate(self) -> BlockImportParams { + pub fn clear_storage_changes_and_mutate( + self, + ) -> BlockImportParams { // Preserve imported state. let state_action = match self.state_action { StateAction::ApplyChanges(StorageChanges::Import(state)) => @@ -305,14 +301,15 @@ impl BlockImportParams { let (k, v) = self.intermediates.remove_entry(key).ok_or(Error::NoIntermediate)?; v.downcast::().or_else(|v| { - self.intermediates.insert(k, v); - Err(Error::InvalidIntermediate) + self.intermediates.insert(k, v); + Err(Error::InvalidIntermediate) }) } /// Get a reference to a given intermediate. pub fn intermediate(&self, key: &[u8]) -> Result<&T, Error> { - self.intermediates.get(key) + self.intermediates + .get(key) .ok_or(Error::NoIntermediate)? .downcast_ref::() .ok_or(Error::InvalidIntermediate) @@ -320,7 +317,8 @@ impl BlockImportParams { /// Get a mutable reference to a given intermediate. pub fn intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { - self.intermediates.get_mut(key) + self.intermediates + .get_mut(key) .ok_or(Error::NoIntermediate)? .downcast_mut::() .ok_or(Error::InvalidIntermediate) @@ -353,8 +351,8 @@ pub trait BlockImport { #[async_trait::async_trait] impl BlockImport for crate::import_queue::BoxBlockImport - where - Transaction: Send + 'static, +where + Transaction: Send + 'static, { type Error = crate::error::Error; type Transaction = Transaction; @@ -381,10 +379,10 @@ impl BlockImport for crate::import_queue::BoxBlockImp #[async_trait::async_trait] impl BlockImport for Arc - where - for<'r> &'r T: BlockImport, - T: Send + Sync, - Transaction: Send + 'static, +where + for<'r> &'r T: BlockImport, + T: Send + Sync, + Transaction: Send + 'static, { type Error = E; type Transaction = Transaction; diff --git a/substrate/primitives/consensus/common/src/block_validation.rs b/substrate/primitives/consensus/common/src/block_validation.rs index fb0846fe9901a28c020ea1c349aecf5e6892fe65..9a9f21394f9ab5d717ffa427de7d838b72680437 100644 --- a/substrate/primitives/consensus/common/src/block_validation.rs +++ b/substrate/primitives/consensus/common/src/block_validation.rs @@ -18,9 +18,9 @@ //! Block announcement validation. use crate::BlockStatus; +use futures::FutureExt as _; use sp_runtime::{generic::BlockId, traits::Block}; use std::{error::Error, future::Future, pin::Pin, sync::Arc}; -use futures::FutureExt as _; /// A type which provides access to chain information. pub trait Chain { @@ -92,6 +92,7 @@ impl BlockAnnounceValidator for DefaultBlockAnnounceValidator { } else { Ok(Validation::Success { is_new_best: false }) } - }.boxed() + } + .boxed() } } diff --git a/substrate/primitives/consensus/common/src/error.rs b/substrate/primitives/consensus/common/src/error.rs index d7461fe92032e5129c6cc79d963363485ff1aa85..546f30d3e820212b6b8845c7f72629dfc4720cc5 100644 --- a/substrate/primitives/consensus/common/src/error.rs +++ b/substrate/primitives/consensus/common/src/error.rs @@ -16,8 +16,8 @@ // limitations under the License. //! Error types in Consensus -use sp_version::RuntimeVersion; use sp_core::ed25519::Public; +use sp_version::RuntimeVersion; use std::error; /// Result type alias. @@ -58,8 +58,10 @@ pub enum Error { #[error("Message sender {0:?} is not a valid authority")] InvalidAuthority(Public), /// Authoring interface does not match the runtime. - #[error("Authoring for current \ - runtime is not supported. Native ({native}) cannot author for on-chain ({on_chain}).")] + #[error( + "Authoring for current \ + runtime is not supported. Native ({native}) cannot author for on-chain ({on_chain})." + )] IncompatibleAuthoringRuntime { native: RuntimeVersion, on_chain: RuntimeVersion }, /// Authoring interface does not match the runtime. #[error("Authoring for current runtime is not supported since it has no version.")] @@ -81,7 +83,7 @@ pub enum Error { ChainLookup(String), /// Signing failed #[error("Failed to sign using key: {0:?}. Reason: {1}")] - CannotSign(Vec, String) + CannotSign(Vec, String), } impl core::convert::From for Error { diff --git a/substrate/primitives/consensus/common/src/evaluation.rs b/substrate/primitives/consensus/common/src/evaluation.rs index c18c8b127f991b2d6a1f0dfcbf18bfa865083a94..19be5e552634938f7894a0a6f0eba69139e32c9c 100644 --- a/substrate/primitives/consensus/common/src/evaluation.rs +++ b/substrate/primitives/consensus/common/src/evaluation.rs @@ -18,7 +18,7 @@ //! Block evaluation and evaluation errors. use codec::Encode; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, One, CheckedConversion}; +use sp_runtime::traits::{Block as BlockT, CheckedConversion, Header as HeaderT, One}; // This is just a best effort to encode the number. None indicated that it's too big to encode // in a u128. @@ -48,15 +48,13 @@ pub fn evaluate_initial( parent_hash: &::Hash, parent_number: <::Header as HeaderT>::Number, ) -> Result<()> { - let encoded = Encode::encode(proposal); - let proposal = Block::decode(&mut &encoded[..]) - .map_err(|e| Error::BadProposalFormat(e))?; + let proposal = Block::decode(&mut &encoded[..]).map_err(|e| Error::BadProposalFormat(e))?; if *parent_hash != *proposal.header().parent_hash() { return Err(Error::WrongParentHash { expected: format!("{:?}", *parent_hash), - got: format!("{:?}", proposal.header().parent_hash()) + got: format!("{:?}", proposal.header().parent_hash()), }) } diff --git a/substrate/primitives/consensus/common/src/import_queue.rs b/substrate/primitives/consensus/common/src/import_queue.rs index 6cac6b1ff9201be573294e48131ba65759b82d3a..6eb8d0a750a245e3d1987074228d5c6b3d10caa7 100644 --- a/substrate/primitives/consensus/common/src/import_queue.rs +++ b/substrate/primitives/consensus/common/src/import_queue.rs @@ -28,14 +28,17 @@ use std::collections::HashMap; -use sp_runtime::{Justifications, traits::{Block as BlockT, Header as _, NumberFor}}; +use sp_runtime::{ + traits::{Block as BlockT, Header as _, NumberFor}, + Justifications, +}; use crate::{ - error::Error as ConsensusError, block_import::{ - BlockImport, BlockOrigin, BlockImportParams, ImportedAux, JustificationImport, ImportResult, - BlockCheckParams, ImportedState, StateAction, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, ImportResult, ImportedAux, + ImportedState, JustificationImport, StateAction, }, + error::Error as ConsensusError, metrics::Metrics, }; pub use basic_queue::BasicQueue; @@ -43,18 +46,19 @@ pub use basic_queue::BasicQueue; /// A commonly-used Import Queue type. /// /// This defines the transaction type of the `BasicQueue` to be the transaction type for a client. -pub type DefaultImportQueue = BasicQueue>; +pub type DefaultImportQueue = + BasicQueue>; mod basic_queue; pub mod buffered_link; /// Shared block import struct used by the queue. -pub type BoxBlockImport = Box< - dyn BlockImport + Send + Sync ->; +pub type BoxBlockImport = + Box + Send + Sync>; /// Shared justification import struct used by the queue. -pub type BoxJustificationImport = Box + Send + Sync>; +pub type BoxJustificationImport = + Box + Send + Sync>; /// Maps to the Origin used by the network. pub type Origin = libp2p::PeerId; @@ -115,7 +119,7 @@ pub trait ImportQueue: Send { who: Origin, hash: B::Hash, number: NumberFor, - justifications: Justifications + justifications: Justifications, ); /// Polls for actions to perform on the network. /// @@ -133,10 +137,18 @@ pub trait Link: Send { &mut self, _imported: usize, _count: usize, - _results: Vec<(Result>, BlockImportError>, B::Hash)> - ) {} + _results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) { + } /// Justification import result. - fn justification_imported(&mut self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} + fn justification_imported( + &mut self, + _who: Origin, + _hash: &B::Hash, + _number: NumberFor, + _success: bool, + ) { + } /// Request a justification for the given block. fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} } @@ -180,7 +192,11 @@ pub async fn import_single_block, Transaction: Send + } /// Single block import function with metering. -pub(crate) async fn import_single_block_metered, Transaction: Send + 'static>( +pub(crate) async fn import_single_block_metered< + B: BlockT, + V: Verifier, + Transaction: Send + 'static, +>( import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, @@ -207,60 +223,61 @@ pub(crate) async fn import_single_block_metered, Trans let hash = header.hash(); let parent_hash = header.parent_hash().clone(); - let import_handler = |import| { - match import { - Ok(ImportResult::AlreadyInChain) => { - trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number, peer.clone())) - }, - Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), - Ok(ImportResult::MissingState) => { - debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::MissingState) - }, - Ok(ImportResult::UnknownParent) => { - debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::UnknownParent) - }, - Ok(ImportResult::KnownBad) => { - debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer.clone())) - }, - Err(e) => { - debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); - Err(BlockImportError::Other(e)) - } - } + let import_handler = |import| match import { + Ok(ImportResult::AlreadyInChain) => { + trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); + Ok(BlockImportResult::ImportedKnown(number, peer.clone())) + }, + Ok(ImportResult::Imported(aux)) => + Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), + Ok(ImportResult::MissingState) => { + debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); + Err(BlockImportError::MissingState) + }, + Ok(ImportResult::UnknownParent) => { + debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash); + Err(BlockImportError::UnknownParent) + }, + Ok(ImportResult::KnownBad) => { + debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); + Err(BlockImportError::BadBlock(peer.clone())) + }, + Err(e) => { + debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); + Err(BlockImportError::Other(e)) + }, }; - match import_handler(import_handle.check_block(BlockCheckParams { - hash, - number, - parent_hash, - allow_missing_state: block.allow_missing_state, - import_existing: block.import_existing, - }).await)? { + match import_handler( + import_handle + .check_block(BlockCheckParams { + hash, + number, + parent_hash, + allow_missing_state: block.allow_missing_state, + import_existing: block.import_existing, + }) + .await, + )? { BlockImportResult::ImportedUnknown { .. } => (), r => return Ok(r), // Any other successful result means that the block is already imported. } let started = wasm_timer::Instant::now(); - let (mut import_block, maybe_keys) = verifier.verify( - block_origin, - header, - justifications, - block.body - ).await.map_err(|msg| { - if let Some(ref peer) = peer { - trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); - } else { - trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); - } - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(false, started.elapsed()); - } - BlockImportError::VerificationFailed(peer.clone(), msg) - })?; + let (mut import_block, maybe_keys) = verifier + .verify(block_origin, header, justifications, block.body) + .await + .map_err(|msg| { + if let Some(ref peer) = peer { + trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); + } else { + trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); + } + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(false, started.elapsed()); + } + BlockImportError::VerificationFailed(peer.clone(), msg) + })?; if let Some(metrics) = metrics.as_ref() { metrics.report_verification(true, started.elapsed()); diff --git a/substrate/primitives/consensus/common/src/import_queue/basic_queue.rs b/substrate/primitives/consensus/common/src/import_queue/basic_queue.rs index 8dd40d84df305b9417dd9e16888f1407768761ae..2610a92ad83e464a2e7244b8db94ce434f2f2f14 100644 --- a/substrate/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -15,20 +15,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{pin::Pin, time::Duration, marker::PhantomData}; -use futures::{prelude::*, task::Context, task::Poll}; +use futures::{ + prelude::*, + task::{Context, Poll}, +}; use futures_timer::Delay; -use sp_runtime::{Justification, Justifications, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; -use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded, TracingUnboundedReceiver}; use prometheus_endpoint::Registry; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + Justification, Justifications, +}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{marker::PhantomData, pin::Pin, time::Duration}; use crate::{ block_import::BlockOrigin, import_queue::{ - BlockImportResult, BlockImportError, Verifier, BoxBlockImport, - BoxJustificationImport, ImportQueue, Link, Origin, - IncomingBlock, import_single_block_metered, - buffered_link::{self, BufferedLinkSender, BufferedLinkReceiver}, + buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, + import_single_block_metered, BlockImportError, BlockImportResult, BoxBlockImport, + BoxJustificationImport, ImportQueue, IncomingBlock, Link, Origin, Verifier, }, metrics::Metrics, }; @@ -85,24 +90,20 @@ impl BasicQueue { spawner.spawn_essential_blocking("basic-block-import-worker", future.boxed()); - Self { - justification_sender, - block_import_sender, - result_port, - _phantom: PhantomData, - } + Self { justification_sender, block_import_sender, result_port, _phantom: PhantomData } } } impl ImportQueue for BasicQueue { fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { if blocks.is_empty() { - return; + return } trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); - let res = - self.block_import_sender.unbounded_send(worker_messages::ImportBlocks(origin, blocks)); + let res = self + .block_import_sender + .unbounded_send(worker_messages::ImportBlocks(origin, blocks)); if res.is_err() { log::error!( @@ -145,7 +146,12 @@ mod worker_messages { use super::*; pub struct ImportBlocks(pub BlockOrigin, pub Vec>); - pub struct ImportJustification(pub Origin, pub B::Hash, pub NumberFor, pub Justification); + pub struct ImportJustification( + pub Origin, + pub B::Hash, + pub NumberFor, + pub Justification, + ); } /// The process of importing blocks. @@ -164,7 +170,8 @@ async fn block_import_process( delay_between_blocks: Duration, ) { loop { - let worker_messages::ImportBlocks(origin, blocks) = match block_import_receiver.next().await { + let worker_messages::ImportBlocks(origin, blocks) = match block_import_receiver.next().await + { Some(blocks) => blocks, None => { log::debug!( @@ -182,7 +189,8 @@ async fn block_import_process( &mut verifier, delay_between_blocks, metrics.clone(), - ).await; + ) + .await; result_sender.blocks_processed(res.imported, res.block_count, res.results); } @@ -214,11 +222,7 @@ impl BlockImportWorker { let (block_import_sender, block_import_port) = tracing_unbounded("mpsc_import_queue_worker_blocks"); - let mut worker = BlockImportWorker { - result_sender, - justification_import, - metrics, - }; + let mut worker = BlockImportWorker { result_sender, justification_import, metrics }; let delay_between_blocks = Duration::default(); @@ -248,29 +252,26 @@ impl BlockImportWorker { target: "block-import", "Stopping block import because result channel was closed!", ); - return; + return } // Make sure to first process all justifications while let Poll::Ready(justification) = futures::poll!(justification_port.next()) { match justification { - Some(ImportJustification(who, hash, number, justification)) => { - worker - .import_justification(who, hash, number, justification) - .await - } + Some(ImportJustification(who, hash, number, justification)) => + worker.import_justification(who, hash, number, justification).await, None => { log::debug!( target: "block-import", "Stopping block import because justification channel was closed!", ); - return; - } + return + }, } } if let Poll::Ready(()) = futures::poll!(&mut block_import_process) { - return; + return } // All futures that we polled are now pending. @@ -310,13 +311,10 @@ impl BlockImportWorker { }; if let Some(metrics) = self.metrics.as_ref() { - metrics - .justification_import_time - .observe(started.elapsed().as_secs_f64()); + metrics.justification_import_time.observe(started.elapsed().as_secs_f64()); } - self.result_sender - .justification_imported(who, &hash, number, success); + self.result_sender.justification_imported(who, &hash, number, success); } } @@ -382,7 +380,8 @@ async fn import_many_blocks, Transaction: Send + 'stat block, verifier, metrics.clone(), - ).await + ) + .await }; if let Some(metrics) = metrics.as_ref() { @@ -604,7 +603,7 @@ mod tests { block_on(futures::future::poll_fn(|cx| { while link.events.len() < 9 { match Future::poll(Pin::new(&mut worker), cx) { - Poll::Pending => {} + Poll::Pending => {}, Poll::Ready(()) => panic!("import queue worker should not conclude."), } diff --git a/substrate/primitives/consensus/common/src/import_queue/buffered_link.rs b/substrate/primitives/consensus/common/src/import_queue/buffered_link.rs index 0295f704c4efcfa094b4a5c9101fc8776606ee08..8d146dfbe461fb549bc1822267f916ce27398545 100644 --- a/substrate/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/substrate/primitives/consensus/common/src/import_queue/buffered_link.rs @@ -36,13 +36,15 @@ //! std::task::Poll::Pending::<()> //! }); //! ``` -//! +use crate::import_queue::{BlockImportError, BlockImportResult, Link, Origin}; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; -use std::{pin::Pin, task::Context, task::Poll}; -use crate::import_queue::{Origin, Link, BlockImportResult, BlockImportError}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; /// Wraps around an unbounded channel from the `futures` crate. The sender implements `Link` and /// can be used to buffer commands, and the receiver can be used to poll said commands and transfer @@ -70,15 +72,17 @@ impl BufferedLinkSender { impl Clone for BufferedLinkSender { fn clone(&self) -> Self { - BufferedLinkSender { - tx: self.tx.clone(), - } + BufferedLinkSender { tx: self.tx.clone() } } } /// Internal buffered message. enum BlockImportWorkerMsg { - BlocksProcessed(usize, usize, Vec<(Result>, BlockImportError>, B::Hash)>), + BlocksProcessed( + usize, + usize, + Vec<(Result>, BlockImportError>, B::Hash)>, + ), JustificationImported(Origin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), } @@ -88,9 +92,11 @@ impl Link for BufferedLinkSender { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::BlocksProcessed(imported, count, results)); + let _ = self + .tx + .unbounded_send(BlockImportWorkerMsg::BlocksProcessed(imported, count, results)); } fn justification_imported( @@ -98,14 +104,16 @@ impl Link for BufferedLinkSender { who: Origin, hash: &B::Hash, number: NumberFor, - success: bool + success: bool, ) { let msg = BlockImportWorkerMsg::JustificationImported(who, hash.clone(), number, success); let _ = self.tx.unbounded_send(msg); } fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestJustification(hash.clone(), number)); + let _ = self + .tx + .unbounded_send(BlockImportWorkerMsg::RequestJustification(hash.clone(), number)); } } diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index 51b2a96e17758a6621a8cd14b4e33daa0e2c620c..eb524422a6e250c94c4127a79a1bd8a4a51f5620 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -23,28 +23,28 @@ // This provides "unused" building blocks to other crates #![allow(dead_code)] - // our error-chain could potentially blow up otherwise -#![recursion_limit="128"] +#![recursion_limit = "128"] -#[macro_use] extern crate log; +#[macro_use] +extern crate log; -use std::sync::Arc; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; +use futures::prelude::*; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, DigestFor, NumberFor, HashFor}, + generic::BlockId, + traits::{Block as BlockT, DigestFor, HashFor, NumberFor}, }; -use futures::prelude::*; use sp_state_machine::StorageProof; +pub mod block_import; pub mod block_validation; pub mod error; -pub mod block_import; -mod select_chain; -pub mod import_queue; pub mod evaluation; +pub mod import_queue; mod metrics; +mod select_chain; pub use self::error::Error; pub use block_import::{ @@ -52,10 +52,10 @@ pub use block_import::{ ImportResult, ImportedAux, ImportedState, JustificationImport, JustificationSyncLink, StateAction, StorageChanges, }; -pub use select_chain::SelectChain; -pub use sp_state_machine::Backend as StateBackend; pub use import_queue::DefaultImportQueue; +pub use select_chain::SelectChain; pub use sp_inherents::InherentData; +pub use sp_state_machine::Backend as StateBackend; /// Block status. #[derive(Debug, PartialEq, Eq)] @@ -80,7 +80,9 @@ pub trait Environment { type Proposer: Proposer + Send + 'static; /// A future that resolves to the proposer. type CreateProposer: Future> - + Send + Unpin + 'static; + + Send + + Unpin + + 'static; /// Error which can occur upon creation. type Error: From + std::fmt::Debug + 'static; @@ -96,7 +98,8 @@ pub struct Proposal { /// Proof that was recorded while building the block. pub proof: Proof, /// The storage changes while building this block. - pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, + pub storage_changes: + sp_state_machine::StorageChanges, NumberFor>, } /// Error that is returned when [`ProofRecording`] requested to record a proof, @@ -179,8 +182,7 @@ pub trait Proposer { /// The transaction type used by the backend. type Transaction: Default + Send + 'static; /// Future that resolves to a committed proposal with an optional proof. - type Proposal: - Future, Self::Error>> + type Proposal: Future, Self::Error>> + Send + Unpin + 'static; @@ -233,11 +235,19 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&mut self) -> bool { false } - fn is_offline(&mut self) -> bool { false } + fn is_major_syncing(&mut self) -> bool { + false + } + fn is_offline(&mut self) -> bool { + false + } } -impl SyncOracle for Arc where T: ?Sized, for<'r> &'r T: SyncOracle { +impl SyncOracle for Arc +where + T: ?Sized, + for<'r> &'r T: SyncOracle, +{ fn is_major_syncing(&mut self) -> bool { <&T>::is_major_syncing(&mut &**self) } @@ -277,13 +287,10 @@ impl, Block: BlockT> CanAuthorWith) -> Result<(), String> { match self.0.runtime_version(at) { Ok(version) => self.0.native_version().can_author_with(&version), - Err(e) => { - Err(format!( - "Failed to get runtime version at `{}` and will disable authoring. Error: {}", - at, - e, - )) - } + Err(e) => Err(format!( + "Failed to get runtime version at `{}` and will disable authoring. Error: {}", + at, e, + )), } } } diff --git a/substrate/primitives/consensus/common/src/metrics.rs b/substrate/primitives/consensus/common/src/metrics.rs index 29d39436cbefc0c1a2efb2e78038a5eee9b5783d..c56f68625b6a0aa932fd3679a6c5a0c9762281fb 100644 --- a/substrate/primitives/consensus/common/src/metrics.rs +++ b/substrate/primitives/consensus/common/src/metrics.rs @@ -18,12 +18,13 @@ //! Metering tools for consensus use prometheus_endpoint::{ - register, U64, Registry, PrometheusError, Opts, CounterVec, Histogram, HistogramVec, HistogramOpts + register, CounterVec, Histogram, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, + U64, }; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::import_queue::{BlockImportResult, BlockImportError}; +use crate::import_queue::{BlockImportError, BlockImportResult}; /// Generic Prometheus metrics for common consensus functionality. #[derive(Clone)] @@ -40,36 +41,29 @@ impl Metrics { import_queue_processed: register( CounterVec::new( Opts::new("import_queue_processed_total", "Blocks processed by import queue"), - &["result"] // 'success or failure + &["result"], // 'success or failure )?, registry, )?, block_verification_time: register( HistogramVec::new( - HistogramOpts::new( - "block_verification_time", - "Time taken to verify blocks", - ), + HistogramOpts::new("block_verification_time", "Time taken to verify blocks"), &["result"], )?, registry, )?, block_verification_and_import_time: register( - Histogram::with_opts( - HistogramOpts::new( - "block_verification_and_import_time", - "Time taken to verify and import blocks", - ), - )?, + Histogram::with_opts(HistogramOpts::new( + "block_verification_and_import_time", + "Time taken to verify and import blocks", + ))?, registry, )?, justification_import_time: register( - Histogram::with_opts( - HistogramOpts::new( - "justification_import_time", - "Time taken to import justifications", - ), - )?, + Histogram::with_opts(HistogramOpts::new( + "justification_import_time", + "Time taken to import justifications", + ))?, registry, )?, }) @@ -82,7 +76,7 @@ impl Metrics { let label = match result { Ok(_) => "success", Err(BlockImportError::IncompleteHeader(_)) => "incomplete_header", - Err(BlockImportError::VerificationFailed(_,_)) => "verification_failed", + Err(BlockImportError::VerificationFailed(_, _)) => "verification_failed", Err(BlockImportError::BadBlock(_)) => "bad_block", Err(BlockImportError::MissingState) => "missing_state", Err(BlockImportError::UnknownParent) => "unknown_parent", @@ -90,15 +84,13 @@ impl Metrics { Err(BlockImportError::Other(_)) => "failed", }; - self.import_queue_processed.with_label_values( - &[label] - ).inc(); + self.import_queue_processed.with_label_values(&[label]).inc(); } pub fn report_verification(&self, success: bool, time: std::time::Duration) { - self.block_verification_time.with_label_values( - &[if success { "success" } else { "verification_failed" }] - ).observe(time.as_secs_f64()); + self.block_verification_time + .with_label_values(&[if success { "success" } else { "verification_failed" }]) + .observe(time.as_secs_f64()); } pub fn report_verification_and_import(&self, time: std::time::Duration) { diff --git a/substrate/primitives/consensus/common/src/select_chain.rs b/substrate/primitives/consensus/common/src/select_chain.rs index e99a6756175d2759d7ce4ffe8cdc510a4adec151..5408fc86b7bd451b0018f183f976bebaa7e8004b 100644 --- a/substrate/primitives/consensus/common/src/select_chain.rs +++ b/substrate/primitives/consensus/common/src/select_chain.rs @@ -18,7 +18,6 @@ use crate::error::Error; use sp_runtime::traits::{Block as BlockT, NumberFor}; - /// The SelectChain trait defines the strategy upon which the head is chosen /// if multiple forks are present for an opaque definition of "best" in the /// specific chain build. diff --git a/substrate/primitives/consensus/pow/src/lib.rs b/substrate/primitives/consensus/pow/src/lib.rs index 12d3440ea9d54c9318fe4d6b831597e6e04d4dd0..ac8bc589c136f00a7f71ae9eb33c2d27ef69237a 100644 --- a/substrate/primitives/consensus/pow/src/lib.rs +++ b/substrate/primitives/consensus/pow/src/lib.rs @@ -19,9 +19,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::vec::Vec; -use sp_runtime::ConsensusEngineId; use codec::Decode; +use sp_runtime::ConsensusEngineId; +use sp_std::vec::Vec; /// The `ConsensusEngineId` of PoW. pub const POW_ENGINE_ID: ConsensusEngineId = [b'p', b'o', b'w', b'_']; diff --git a/substrate/primitives/consensus/vrf/src/schnorrkel.rs b/substrate/primitives/consensus/vrf/src/schnorrkel.rs index 400bdb2f5808853293b69d8b25383f5527cc86de..687e0bd2318205e36c371898a2a260c8d288aac7 100644 --- a/substrate/primitives/consensus/vrf/src/schnorrkel.rs +++ b/substrate/primitives/consensus/vrf/src/schnorrkel.rs @@ -17,13 +17,19 @@ //! Schnorrkel-based VRF. -use codec::{Encode, Decode, EncodeLike}; -use sp_std::{convert::TryFrom, prelude::*}; -use sp_core::U512; -use sp_std::ops::{Deref, DerefMut}; +use codec::{Decode, Encode, EncodeLike}; use schnorrkel::errors::MultiSignatureStage; +use sp_core::U512; +use sp_std::{ + convert::TryFrom, + ops::{Deref, DerefMut}, + prelude::*, +}; -pub use schnorrkel::{SignatureError, PublicKey, vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}}; +pub use schnorrkel::{ + vrf::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, + PublicKey, SignatureError, +}; /// The length of the Randomness. pub const RANDOMNESS_LENGTH: usize = VRF_OUTPUT_LENGTH; @@ -34,11 +40,15 @@ pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); impl Deref for VRFOutput { type Target = schnorrkel::vrf::VRFOutput; - fn deref(&self) -> &Self::Target { &self.0 } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl DerefMut for VRFOutput { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } impl Encode for VRFOutput { @@ -47,7 +57,7 @@ impl Encode for VRFOutput { } } -impl EncodeLike for VRFOutput { } +impl EncodeLike for VRFOutput {} impl Decode for VRFOutput { fn decode(i: &mut R) -> Result { @@ -82,11 +92,15 @@ impl Ord for VRFProof { impl Deref for VRFProof { type Target = schnorrkel::vrf::VRFProof; - fn deref(&self) -> &Self::Target { &self.0 } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl DerefMut for VRFProof { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } impl Encode for VRFProof { @@ -95,7 +109,7 @@ impl Encode for VRFProof { } } -impl EncodeLike for VRFProof { } +impl EncodeLike for VRFProof {} impl Decode for VRFProof { fn decode(i: &mut R) -> Result { @@ -113,8 +127,8 @@ impl TryFrom<[u8; VRF_PROOF_LENGTH]> for VRFProof { } fn convert_error(e: SignatureError) -> codec::Error { - use SignatureError::*; use MultiSignatureStage::*; + use SignatureError::*; match e { EquationFalse => "Signature error: `EquationFalse`".into(), PointDecompressionError => "Signature error: `PointDecompressionError`".into(), diff --git a/substrate/primitives/core/benches/bench.rs b/substrate/primitives/core/benches/bench.rs index 77680d53be6c4b7249e40baf46ef2918c52b2a6a..44bcd657ba3f02c0b6cff7cc266d50097c8651fd 100644 --- a/substrate/primitives/core/benches/bench.rs +++ b/substrate/primitives/core/benches/bench.rs @@ -15,22 +15,21 @@ #[macro_use] extern crate criterion; -use criterion::{Criterion, black_box, Bencher, BenchmarkId}; -use sp_core::crypto::Pair as _; -use sp_core::hashing::{twox_128, blake2_128}; +use criterion::{black_box, Bencher, BenchmarkId, Criterion}; +use sp_core::{ + crypto::Pair as _, + hashing::{blake2_128, twox_128}, +}; const MAX_KEY_SIZE: u32 = 32; fn get_key(key_size: u32) -> Vec { - use rand::SeedableRng; - use rand::Rng; + use rand::{Rng, SeedableRng}; let rnd: [u8; 32] = rand::rngs::StdRng::seed_from_u64(12).gen(); let mut rnd = rnd.iter().cycle(); - (0..key_size) - .map(|_| *rnd.next().unwrap()) - .collect() + (0..key_size).map(|_| *rnd.next().unwrap()).collect() } fn bench_blake2_128(b: &mut Bencher, key: &Vec) { @@ -81,27 +80,21 @@ fn bench_ed25519(c: &mut Criterion) { let mut group = c.benchmark_group("ed25519"); for msg_size in vec![32, 1024, 1024 * 1024] { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ed25519::Pair::generate().0; - group.bench_function( - BenchmarkId::new("signing", format!("{}", msg_size)), - |b| b.iter(|| key.sign(&msg)), - ); + group.bench_function(BenchmarkId::new("signing", format!("{}", msg_size)), |b| { + b.iter(|| key.sign(&msg)) + }); } for msg_size in vec![32, 1024, 1024 * 1024] { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ed25519::Pair::generate().0; let sig = key.sign(&msg); let public = key.public(); - group.bench_function( - BenchmarkId::new("verifying", format!("{}", msg_size)), - |b| b.iter(|| sp_core::ed25519::Pair::verify(&sig, &msg, &public)), - ); + group.bench_function(BenchmarkId::new("verifying", format!("{}", msg_size)), |b| { + b.iter(|| sp_core::ed25519::Pair::verify(&sig, &msg, &public)) + }); } group.finish(); @@ -111,27 +104,21 @@ fn bench_sr25519(c: &mut Criterion) { let mut group = c.benchmark_group("sr25519"); for msg_size in vec![32, 1024, 1024 * 1024] { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::sr25519::Pair::generate().0; - group.bench_function( - BenchmarkId::new("signing", format!("{}", msg_size)), - |b| b.iter(|| key.sign(&msg)), - ); + group.bench_function(BenchmarkId::new("signing", format!("{}", msg_size)), |b| { + b.iter(|| key.sign(&msg)) + }); } for msg_size in vec![32, 1024, 1024 * 1024] { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::sr25519::Pair::generate().0; let sig = key.sign(&msg); let public = key.public(); - group.bench_function( - BenchmarkId::new("verifying", format!("{}", msg_size)), - |b| b.iter(|| sp_core::sr25519::Pair::verify(&sig, &msg, &public)), - ); + group.bench_function(BenchmarkId::new("verifying", format!("{}", msg_size)), |b| { + b.iter(|| sp_core::sr25519::Pair::verify(&sig, &msg, &public)) + }); } group.finish(); @@ -141,27 +128,21 @@ fn bench_ecdsa(c: &mut Criterion) { let mut group = c.benchmark_group("ecdsa"); for msg_size in vec![32, 1024, 1024 * 1024] { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ecdsa::Pair::generate().0; - group.bench_function( - BenchmarkId::new("signing", format!("{}", msg_size)), - |b| b.iter(|| key.sign(&msg)), - ); + group.bench_function(BenchmarkId::new("signing", format!("{}", msg_size)), |b| { + b.iter(|| key.sign(&msg)) + }); } for msg_size in vec![32, 1024, 1024 * 1024] { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); + let msg = (0..msg_size).map(|_| rand::random::()).collect::>(); let key = sp_core::ecdsa::Pair::generate().0; let sig = key.sign(&msg); let public = key.public(); - group.bench_function( - BenchmarkId::new("verifying", format!("{}", msg_size)), - |b| b.iter(|| sp_core::ecdsa::Pair::verify(&sig, &msg, &public)), - ); + group.bench_function(BenchmarkId::new("verifying", format!("{}", msg_size)), |b| { + b.iter(|| sp_core::ecdsa::Pair::verify(&sig, &msg, &public)) + }); } group.finish(); diff --git a/substrate/primitives/core/src/changes_trie.rs b/substrate/primitives/core/src/changes_trie.rs index 7b886244a06460ad5bf310a24f31bc3e1c59e2ac..dd99a5f769ce9e853f5bcf1cb3de6c859d810759 100644 --- a/substrate/primitives/core/src/changes_trie.rs +++ b/substrate/primitives/core/src/changes_trie.rs @@ -17,13 +17,16 @@ //! Substrate changes trie configuration. -#[cfg(any(feature = "std", test))] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use num_traits::Zero; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; /// Substrate changes trie configuration. -#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf) +)] #[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] pub struct ChangesTrieConfiguration { /// Interval (in blocks) at which level1-digests are created. Digests are not @@ -62,32 +65,31 @@ impl ChangesTrieConfiguration { } /// Do we need to build digest at given block? - pub fn is_digest_build_required_at_block( - &self, - zero: Number, - block: Number, - ) -> bool - where - Number: From + PartialEq + - ::sp_std::ops::Rem + ::sp_std::ops::Sub + - ::sp_std::cmp::PartialOrd + Zero, + pub fn is_digest_build_required_at_block(&self, zero: Number, block: Number) -> bool + where + Number: From + + PartialEq + + ::sp_std::ops::Rem + + ::sp_std::ops::Sub + + ::sp_std::cmp::PartialOrd + + Zero, { - block > zero - && self.is_digest_build_enabled() - && ((block - zero) % self.digest_interval.into()).is_zero() + block > zero && + self.is_digest_build_enabled() && + ((block - zero) % self.digest_interval.into()).is_zero() } /// Returns max digest interval. One if digests are not created at all. pub fn max_digest_interval(&self) -> u32 { if !self.is_digest_build_enabled() { - return 1; + return 1 } // we'll get >1 loop iteration only when bad configuration parameters are selected let mut current_level = self.digest_levels; loop { if let Some(max_digest_interval) = self.digest_interval.checked_pow(current_level) { - return max_digest_interval; + return max_digest_interval } current_level -= 1; @@ -97,25 +99,28 @@ impl ChangesTrieConfiguration { /// Returns max level digest block number that has been created at block <= passed block number. /// /// Returns None if digests are not created at all. - pub fn prev_max_level_digest_block( - &self, - zero: Number, - block: Number, - ) -> Option - where - Number: Clone + From + PartialOrd + PartialEq + - ::sp_std::ops::Add + ::sp_std::ops::Sub + - ::sp_std::ops::Div + ::sp_std::ops::Mul + Zero, + pub fn prev_max_level_digest_block(&self, zero: Number, block: Number) -> Option + where + Number: Clone + + From + + PartialOrd + + PartialEq + + ::sp_std::ops::Add + + ::sp_std::ops::Sub + + ::sp_std::ops::Div + + ::sp_std::ops::Mul + + Zero, { if block <= zero { - return None; + return None } - let (next_begin, next_end) = self.next_max_level_digest_range(zero.clone(), block.clone())?; + let (next_begin, next_end) = + self.next_max_level_digest_range(zero.clone(), block.clone())?; // if 'next' digest includes our block, then it is a also a previous digest if next_end == block { - return Some(block); + return Some(block) } // if previous digest ends at zero block, then there are no previous digest @@ -136,13 +141,18 @@ impl ChangesTrieConfiguration { zero: Number, mut block: Number, ) -> Option<(Number, Number)> - where - Number: Clone + From + PartialOrd + PartialEq + - ::sp_std::ops::Add + ::sp_std::ops::Sub + - ::sp_std::ops::Div + ::sp_std::ops::Mul, + where + Number: Clone + + From + + PartialOrd + + PartialEq + + ::sp_std::ops::Add + + ::sp_std::ops::Sub + + ::sp_std::ops::Div + + ::sp_std::ops::Mul, { if !self.is_digest_build_enabled() { - return None; + return None } if block <= zero { @@ -152,7 +162,7 @@ impl ChangesTrieConfiguration { let max_digest_interval: Number = self.max_digest_interval().into(); let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); if max_digests_since_zero == 0.into() { - return Some((zero.clone() + 1.into(), zero + max_digest_interval)); + return Some((zero.clone() + 1.into(), zero + max_digest_interval)) } let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); Some(if block == last_max_digest_block { @@ -169,14 +179,22 @@ impl ChangesTrieConfiguration { /// digest interval (in blocks) /// step between blocks we're interested in when digest is built /// ) - pub fn digest_level_at_block(&self, zero: Number, block: Number) -> Option<(u32, u32, u32)> - where - Number: Clone + From + PartialEq + - ::sp_std::ops::Rem + ::sp_std::ops::Sub + - ::sp_std::cmp::PartialOrd + Zero, + pub fn digest_level_at_block( + &self, + zero: Number, + block: Number, + ) -> Option<(u32, u32, u32)> + where + Number: Clone + + From + + PartialEq + + ::sp_std::ops::Rem + + ::sp_std::ops::Sub + + ::sp_std::cmp::PartialOrd + + Zero, { if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { - return None; + return None } let relative_block = block - zero; @@ -185,8 +203,9 @@ impl ChangesTrieConfiguration { let mut digest_step = 1u32; while current_level < self.digest_levels { let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { - Some(new_digest_interval) if (relative_block.clone() % new_digest_interval.into()).is_zero() - => new_digest_interval, + Some(new_digest_interval) + if (relative_block.clone() % new_digest_interval.into()).is_zero() => + new_digest_interval, _ => break, }; @@ -195,11 +214,7 @@ impl ChangesTrieConfiguration { current_level += 1; } - Some(( - current_level, - digest_interval, - digest_step, - )) + Some((current_level, digest_interval, digest_step)) } } @@ -208,10 +223,7 @@ mod tests { use super::ChangesTrieConfiguration; fn config(interval: u32, levels: u32) -> ChangesTrieConfiguration { - ChangesTrieConfiguration { - digest_interval: interval, - digest_levels: levels, - } + ChangesTrieConfiguration { digest_interval: interval, digest_levels: levels } } #[test] @@ -255,7 +267,10 @@ mod tests { assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 8u64), Some((1, 8, 1))); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 64u64), Some((2, 64, 8))); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 512u64), Some((3, 512, 64))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4096u64), Some((4, 4096, 512))); + assert_eq!( + config(8, 4).digest_level_at_block(zero, zero + 4096u64), + Some((4, 4096, 512)) + ); assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4112u64), Some((1, 8, 1))); } diff --git a/substrate/primitives/core/src/crypto.rs b/substrate/primitives/core/src/crypto.rs index 7f8aecebbc6db37c5c845633f38a76005cebea84..fcf5c65c0a61f05e29004bc440d11bbabe79ea39 100644 --- a/substrate/primitives/core/src/crypto.rs +++ b/substrate/primitives/core/src/crypto.rs @@ -19,37 +19,35 @@ //! Cryptographic utilities. // end::description[] -use crate::{sr25519, ed25519}; -use sp_std::hash::Hash; -use sp_std::vec::Vec; -use sp_std::str; #[cfg(feature = "std")] -use sp_std::convert::TryInto; -use sp_std::convert::TryFrom; +use crate::hexdisplay::HexDisplay; +use crate::{ed25519, sr25519}; +#[cfg(feature = "std")] +use base58::{FromBase58, ToBase58}; +use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] use parking_lot::Mutex; #[cfg(feature = "std")] -use rand::{RngCore, rngs::OsRng}; -use codec::{Encode, Decode, MaxEncodedLen}; +use rand::{rngs::OsRng, RngCore}; #[cfg(feature = "std")] use regex::Regex; +/// Trait for accessing reference to `SecretString`. +pub use secrecy::ExposeSecret; +/// A store for sensitive data. #[cfg(feature = "std")] -use base58::{FromBase58, ToBase58}; +pub use secrecy::SecretString; +use sp_runtime_interface::pass_by::PassByInner; #[cfg(feature = "std")] -use crate::hexdisplay::HexDisplay; +use sp_std::convert::TryInto; #[doc(hidden)] pub use sp_std::ops::Deref; -use sp_runtime_interface::pass_by::PassByInner; +use sp_std::{convert::TryFrom, hash::Hash, str, vec::Vec}; /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; -/// Trait for accessing reference to `SecretString`. -pub use secrecy::ExposeSecret; -/// A store for sensitive data. -#[cfg(feature = "std")] -pub use secrecy::SecretString; /// The root phrase for our publicly known keys. -pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; +pub const DEV_PHRASE: &str = + "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; /// The address of the associated root phrase for our publicly known keys. pub const DEV_ADDRESS: &str = "5DfhGyQdFobKM8NsWvEeAKk5EQQgYe9AydgJ7rMB6E1EqRzV"; @@ -118,22 +116,28 @@ pub enum DeriveJunction { #[cfg(feature = "full_crypto")] impl DeriveJunction { /// Consume self to return a soft derive junction with the same chain code. - pub fn soften(self) -> Self { DeriveJunction::Soft(self.unwrap_inner()) } + pub fn soften(self) -> Self { + DeriveJunction::Soft(self.unwrap_inner()) + } /// Consume self to return a hard derive junction with the same chain code. - pub fn harden(self) -> Self { DeriveJunction::Hard(self.unwrap_inner()) } + pub fn harden(self) -> Self { + DeriveJunction::Hard(self.unwrap_inner()) + } /// Create a new soft (vanilla) DeriveJunction from a given, encodable, value. /// /// If you need a hard junction, use `hard()`. pub fn soft(index: T) -> Self { let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); - index.using_encoded(|data| if data.len() > JUNCTION_ID_LEN { - let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); - let hash = hash_result.as_bytes(); - cc.copy_from_slice(hash); - } else { - cc[0..data.len()].copy_from_slice(data); + index.using_encoded(|data| { + if data.len() > JUNCTION_ID_LEN { + let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); + let hash = hash_result.as_bytes(); + cc.copy_from_slice(hash); + } else { + cc[0..data.len()].copy_from_slice(data); + } }); DeriveJunction::Soft(cc) } @@ -174,11 +178,8 @@ impl DeriveJunction { impl> From for DeriveJunction { fn from(j: T) -> DeriveJunction { let j = j.as_ref(); - let (code, hard) = if let Some(stripped) = j.strip_prefix('/') { - (stripped, true) - } else { - (j, false) - }; + let (code, hard) = + if let Some(stripped) = j.strip_prefix('/') { (stripped, true) } else { (j, false) }; let res = if let Ok(n) = str::parse::(code) { // number @@ -231,12 +232,11 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { /// Some if the string is a properly encoded SS58Check address. #[cfg(feature = "std")] fn from_ss58check(s: &str) -> Result { - Self::from_ss58check_with_version(s) - .and_then(|(r, v)| match v { - v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), - _ => Err(PublicError::UnknownVersion), - }) + Self::from_ss58check_with_version(s).and_then(|(r, v)| match v { + v if !v.is_custom() => Ok(r), + v if v == *DEFAULT_VERSION.lock() => Ok(r), + _ => Err(PublicError::UnknownVersion), + }) } /// Some if the string is a properly encoded SS58Check address. @@ -249,7 +249,9 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { let body_len = res.as_mut().len(); let data = s.from_base58().map_err(|_| PublicError::BadBase58)?; - if data.len() < 2 { return Err(PublicError::BadLength); } + if data.len() < 2 { + return Err(PublicError::BadLength) + } let (prefix_len, ident) = match data[0] { 0..=63 => (1, data[0] as u16), 64..=127 => { @@ -261,18 +263,22 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { let lower = (data[0] << 2) | (data[1] >> 6); let upper = data[1] & 0b00111111; (2, (lower as u16) | ((upper as u16) << 8)) - } + }, _ => return Err(PublicError::UnknownVersion), }; - if data.len() != prefix_len + body_len + CHECKSUM_LEN { return Err(PublicError::BadLength) } + if data.len() != prefix_len + body_len + CHECKSUM_LEN { + return Err(PublicError::BadLength) + } let format = ident.try_into().map_err(|_: ()| PublicError::UnknownVersion)?; - if !Self::format_is_allowed(format) { return Err(PublicError::FormatNotAllowed) } + if !Self::format_is_allowed(format) { + return Err(PublicError::FormatNotAllowed) + } let hash = ss58hash(&data[0..body_len + prefix_len]); let checksum = &hash.as_bytes()[0..CHECKSUM_LEN]; if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { // Invalid checksum. - return Err(PublicError::InvalidChecksum); + return Err(PublicError::InvalidChecksum) } res.as_mut().copy_from_slice(&data[prefix_len..body_len + prefix_len]); Ok((res, format)) @@ -282,12 +288,11 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { /// a derivation path following. #[cfg(feature = "std")] fn from_string(s: &str) -> Result { - Self::from_string_with_version(s) - .and_then(|(r, v)| match v { - v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), - _ => Err(PublicError::UnknownVersion), - }) + Self::from_string_with_version(s).and_then(|(r, v)| match v { + v if !v.is_custom() => Ok(r), + v if v == *DEFAULT_VERSION.lock() => Ok(r), + _ => Err(PublicError::UnknownVersion), + }) } /// Return the ss58-check string for this key. @@ -304,7 +309,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { // lower bits of the upper byte in the low pos let second = ((ident >> 8) as u8) | ((ident & 0b0000_0000_0000_0011) as u8) << 6; vec![first | 0b01000000, second] - } + }, _ => unreachable!("masked out the upper two bits; qed"), }; v.extend(self.as_ref()); @@ -315,7 +320,9 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { /// Return the ss58-check string for this key. #[cfg(feature = "std")] - fn to_ss58check(&self) -> String { self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) } + fn to_ss58check(&self) -> String { + self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) + } /// Some if the string is a properly encoded SS58Check address, optionally with /// a derivation path following. @@ -331,7 +338,7 @@ pub trait Derive: Sized { /// /// Will be `None` for public keys if there are any hard junctions in there. #[cfg(feature = "std")] - fn derive>(&self, _path: Iter) -> Option { + fn derive>(&self, _path: Iter) -> Option { None } } @@ -629,9 +636,7 @@ lazy_static::lazy_static! { impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; - let s = cap.name("ss58") - .map(|r| r.as_str()) - .unwrap_or(DEV_ADDRESS); + let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); let addr = if let Some(stripped) = s.strip_prefix("0x") { let d = hex::decode(stripped).map_err(|_| PublicError::InvalidFormat)?; let mut r = Self::default(); @@ -647,28 +652,23 @@ impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { if cap["path"].is_empty() { Ok(addr) } else { - let path = JUNCTION_REGEX.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - addr.derive(path) - .ok_or(PublicError::InvalidPath) + let path = + JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); + addr.derive(path).ok_or(PublicError::InvalidPath) } } fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; let (addr, v) = Self::from_ss58check_with_version( - cap.name("ss58") - .map(|r| r.as_str()) - .unwrap_or(DEV_ADDRESS) + cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS), )?; if cap["path"].is_empty() { Ok((addr, v)) } else { - let path = JUNCTION_REGEX.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - addr.derive(path) - .ok_or(PublicError::InvalidPath) - .map(|a| (a, v)) + let path = + JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); + addr.derive(path).ok_or(PublicError::InvalidPath).map(|a| (a, v)) } } } @@ -694,10 +694,14 @@ pub trait Public: fn from_slice(data: &[u8]) -> Self; /// Return a `Vec` filled with raw data. - fn to_raw_vec(&self) -> Vec { self.as_slice().to_vec() } + fn to_raw_vec(&self) -> Vec { + self.as_slice().to_vec() + } /// Return a slice filled with raw data. - fn as_slice(&self) -> &[u8] { self.as_ref() } + fn as_slice(&self) -> &[u8] { + self.as_ref() + } /// Return `CryptoTypePublicPair` from public key. fn to_public_crypto_pair(&self) -> CryptoTypePublicPair; } @@ -809,14 +813,20 @@ impl sp_std::fmt::Debug for AccountId32 { #[cfg(feature = "std")] impl serde::Serialize for AccountId32 { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> serde::Deserialize<'de> for AccountId32 { - fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { Ss58Codec::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| serde::de::Error::custom(format!("{:?}", e))) } @@ -851,11 +861,13 @@ mod dummy { pub struct Dummy; impl AsRef<[u8]> for Dummy { - fn as_ref(&self) -> &[u8] { &b""[..] } + fn as_ref(&self) -> &[u8] { + &b""[..] + } } impl AsMut<[u8]> for Dummy { - fn as_mut(&mut self) -> &mut[u8] { + fn as_mut(&mut self) -> &mut [u8] { unsafe { #[allow(mutable_transmutes)] sp_std::mem::transmute::<_, &'static mut [u8]>(&b""[..]) @@ -878,14 +890,18 @@ mod dummy { impl Derive for Dummy {} impl Public for Dummy { - fn from_slice(_: &[u8]) -> Self { Self } + fn from_slice(_: &[u8]) -> Self { + Self + } #[cfg(feature = "std")] - fn to_raw_vec(&self) -> Vec { vec![] } - fn as_slice(&self) -> &[u8] { b"" } + fn to_raw_vec(&self) -> Vec { + vec![] + } + fn as_slice(&self) -> &[u8] { + b"" + } fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair( - CryptoTypeId(*b"dumm"), Public::to_raw_vec(self) - ) + CryptoTypePublicPair(CryptoTypeId(*b"dumm"), Public::to_raw_vec(self)) } } @@ -895,23 +911,41 @@ mod dummy { type Signature = Dummy; type DeriveError = (); #[cfg(feature = "std")] - fn generate_with_phrase(_: Option<&str>) -> (Self, String, Self::Seed) { Default::default() } + fn generate_with_phrase(_: Option<&str>) -> (Self, String, Self::Seed) { + Default::default() + } #[cfg(feature = "std")] - fn from_phrase(_: &str, _: Option<&str>) - -> Result<(Self, Self::Seed), SecretStringError> - { + fn from_phrase(_: &str, _: Option<&str>) -> Result<(Self, Self::Seed), SecretStringError> { Ok(Default::default()) } - fn derive< - Iter: Iterator, - >(&self, _: Iter, _: Option) -> Result<(Self, Option), Self::DeriveError> { Ok((Self, None)) } - fn from_seed(_: &Self::Seed) -> Self { Self } - fn from_seed_slice(_: &[u8]) -> Result { Ok(Self) } - fn sign(&self, _: &[u8]) -> Self::Signature { Self } - fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } - fn verify_weak, M: AsRef<[u8]>>(_: &[u8], _: M, _: P) -> bool { true } - fn public(&self) -> Self::Public { Self } - fn to_raw_vec(&self) -> Vec { vec![] } + fn derive>( + &self, + _: Iter, + _: Option, + ) -> Result<(Self, Option), Self::DeriveError> { + Ok((Self, None)) + } + fn from_seed(_: &Self::Seed) -> Self { + Self + } + fn from_seed_slice(_: &[u8]) -> Result { + Ok(Self) + } + fn sign(&self, _: &[u8]) -> Self::Signature { + Self + } + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { + true + } + fn verify_weak, M: AsRef<[u8]>>(_: &[u8], _: M, _: P) -> bool { + true + } + fn public(&self) -> Self::Public { + Self + } + fn to_raw_vec(&self) -> Vec { + vec![] + } } } @@ -956,10 +990,14 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Self, Self::Seed), SecretStringError>; + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), SecretStringError>; /// Derive a child key from a series of given junctions. - fn derive>(&self, + fn derive>( + &self, path: Iter, seed: Option, ) -> Result<(Self, Option), Self::DeriveError>; @@ -1018,19 +1056,20 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { /// /// `None` is returned if no matches are found. #[cfg(feature = "std")] - fn from_string_with_seed(s: &str, password_override: Option<&str>) - -> Result<(Self, Option), SecretStringError> - { + fn from_string_with_seed( + s: &str, + password_override: Option<&str>, + ) -> Result<(Self, Option), SecretStringError> { let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?; - let path = JUNCTION_REGEX.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); + let path = JUNCTION_REGEX.captures_iter(&cap["path"]).map(|f| DeriveJunction::from(&f[1])); let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); let password = password_override.or_else(|| cap.name("password").map(|m| m.as_str())); let (root, seed) = if let Some(stripped) = phrase.strip_prefix("0x") { - hex::decode(stripped).ok() + hex::decode(stripped) + .ok() .and_then(|seed_vec| { let mut seed = Self::Seed::default(); if seed.as_ref().len() == seed_vec.len() { @@ -1042,8 +1081,7 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { }) .ok_or(SecretStringError::InvalidSeed)? } else { - Self::from_phrase(phrase, password) - .map_err(|_| SecretStringError::InvalidPhrase)? + Self::from_phrase(phrase, password).map_err(|_| SecretStringError::InvalidPhrase)? }; root.derive(path, Some(seed)).map_err(|_| SecretStringError::InvalidPath) } @@ -1074,19 +1112,25 @@ pub trait Wraps: Sized { type Inner: IsWrappedBy; } -impl IsWrappedBy for T where +impl IsWrappedBy for T +where Outer: AsRef + AsMut + From, T: From, { /// Get a reference to the inner from the outer. - fn from_ref(outer: &Outer) -> &Self { outer.as_ref() } + fn from_ref(outer: &Outer) -> &Self { + outer.as_ref() + } /// Get a mutable reference to the inner from the outer. - fn from_mut(outer: &mut Outer) -> &mut Self { outer.as_mut() } + fn from_mut(outer: &mut Outer) -> &mut Self { + outer.as_mut() + } } -impl UncheckedFrom for Outer where - Outer: Wraps, +impl UncheckedFrom for Outer +where + Outer: Wraps, Inner: IsWrappedBy + UncheckedFrom, { fn unchecked_from(t: T) -> Self { @@ -1110,8 +1154,18 @@ pub trait CryptoType { /// Values whose first character is `_` are reserved for private use and won't conflict with any /// public modules. #[derive( - Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode, PassByInner, - crate::RuntimeDebug + Copy, + Clone, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Encode, + Decode, + PassByInner, + crate::RuntimeDebug, )] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct KeyTypeId(pub [u8; 4]); @@ -1134,7 +1188,7 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { fn try_from(x: &'a str) -> Result { let b = x.as_bytes(); if b.len() != 4 { - return Err(()); + return Err(()) } let mut res = KeyTypeId::default(); res.0.copy_from_slice(&b[0..4]); @@ -1159,7 +1213,7 @@ impl sp_std::fmt::Display for CryptoTypePublicPair { Ok(id) => id.to_string(), Err(_) => { format!("{:#?}", self.0) - } + }, }; write!(f, "{}-{}", id, HexDisplay::from(&self.1)) } @@ -1195,16 +1249,16 @@ pub mod key_types { #[cfg(test)] mod tests { + use super::*; use crate::DeriveJunction; use hex_literal::hex; - use super::*; #[derive(Clone, Eq, PartialEq, Debug)] enum TestPair { Generated, GeneratedWithPhrase, - GeneratedFromPhrase{phrase: String, password: Option}, - Standard{phrase: String, password: Option, path: Vec}, + GeneratedFromPhrase { phrase: String, password: Option }, + Standard { phrase: String, password: Option, path: Vec }, Seed(Vec), } impl Default for TestPair { @@ -1250,9 +1304,7 @@ mod tests { vec![] } fn to_public_crypto_pair(&self) -> CryptoTypePublicPair { - CryptoTypePublicPair( - CryptoTypeId(*b"dumm"), self.to_raw_vec(), - ) + CryptoTypePublicPair(CryptoTypeId(*b"dumm"), self.to_raw_vec()) } } impl Pair for TestPair { @@ -1261,41 +1313,68 @@ mod tests { type Signature = [u8; 0]; type DeriveError = (); - fn generate() -> (Self, ::Seed) { (TestPair::Generated, [0u8; 8]) } + fn generate() -> (Self, ::Seed) { + (TestPair::Generated, [0u8; 8]) + } fn generate_with_phrase(_password: Option<&str>) -> (Self, String, ::Seed) { (TestPair::GeneratedWithPhrase, "".into(), [0u8; 8]) } - fn from_phrase(phrase: &str, password: Option<&str>) - -> Result<(Self, ::Seed), SecretStringError> - { - Ok((TestPair::GeneratedFromPhrase { - phrase: phrase.to_owned(), - password: password.map(Into::into) - }, [0u8; 8])) + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, ::Seed), SecretStringError> { + Ok(( + TestPair::GeneratedFromPhrase { + phrase: phrase.to_owned(), + password: password.map(Into::into), + }, + [0u8; 8], + )) + } + fn derive>( + &self, + path_iter: Iter, + _: Option<[u8; 8]>, + ) -> Result<(Self, Option<[u8; 8]>), Self::DeriveError> { + Ok(( + match self.clone() { + TestPair::Standard { phrase, password, path } => TestPair::Standard { + phrase, + password, + path: path.into_iter().chain(path_iter).collect(), + }, + TestPair::GeneratedFromPhrase { phrase, password } => + TestPair::Standard { phrase, password, path: path_iter.collect() }, + x => + if path_iter.count() == 0 { + x + } else { + return Err(()) + }, + }, + None, + )) + } + fn from_seed(_seed: &::Seed) -> Self { + TestPair::Seed(_seed.as_ref().to_owned()) } - fn derive>(&self, path_iter: Iter, _: Option<[u8; 8]>) - -> Result<(Self, Option<[u8; 8]>), Self::DeriveError> - { - Ok((match self.clone() { - TestPair::Standard {phrase, password, path} => - TestPair::Standard { phrase, password, path: path.into_iter().chain(path_iter).collect() }, - TestPair::GeneratedFromPhrase {phrase, password} => - TestPair::Standard { phrase, password, path: path_iter.collect() }, - x => if path_iter.count() == 0 { x } else { return Err(()) }, - }, None)) + fn sign(&self, _message: &[u8]) -> Self::Signature { + [] + } + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { + true } - fn from_seed(_seed: &::Seed) -> Self { TestPair::Seed(_seed.as_ref().to_owned()) } - fn sign(&self, _message: &[u8]) -> Self::Signature { [] } - fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } fn verify_weak, M: AsRef<[u8]>>( _sig: &[u8], _message: M, - _pubkey: P - ) -> bool { true } - fn public(&self) -> Self::Public { TestPublic } - fn from_seed_slice(seed: &[u8]) - -> Result - { + _pubkey: P, + ) -> bool { + true + } + fn public(&self) -> Self::Public { + TestPublic + } + fn from_seed_slice(seed: &[u8]) -> Result { Ok(TestPair::Seed(seed.to_owned())) } fn to_raw_vec(&self) -> Vec { @@ -1327,43 +1406,83 @@ mod tests { fn interpret_std_secret_string_should_work() { assert_eq!( TestPair::from_string("hello world", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![] + }) ); assert_eq!( TestPair::from_string("hello world/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft(1)]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft(1)] + }) ); assert_eq!( TestPair::from_string("hello world/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world//1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1)]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1)] + }) ); assert_eq!( TestPair::from_string("hello world//DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world//1/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world//DOT/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)] + }) ); assert_eq!( TestPair::from_string("hello world///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![] + }) ); assert_eq!( TestPair::from_string("hello world//1/DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) ); assert_eq!( TestPair::from_string("hello world/1//DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")]}) + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")] + }) ); } @@ -1371,25 +1490,40 @@ mod tests { fn accountid_32_from_str_works() { use std::str::FromStr; assert!(AccountId32::from_str("5G9VdMwXvzza9pS8qE8ZHJk3CheHW9uucBn9ngW4C1gmmzpv").is_ok()); - assert!(AccountId32::from_str("5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").is_ok()); - assert!(AccountId32::from_str("0x5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").is_ok()); + assert!(AccountId32::from_str( + "5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .is_ok()); + assert!(AccountId32::from_str( + "0x5c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .is_ok()); assert_eq!( AccountId32::from_str("99G9VdMwXvzza9pS8qE8ZHJk3CheHW9uucBn9ngW4C1gmmzpv").unwrap_err(), "invalid ss58 address.", ); assert_eq!( - AccountId32::from_str("gc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").unwrap_err(), + AccountId32::from_str( + "gc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .unwrap_err(), "invalid hex address.", ); assert_eq!( - AccountId32::from_str("0xgc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").unwrap_err(), + AccountId32::from_str( + "0xgc55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .unwrap_err(), "invalid hex address.", ); // valid hex but invalid length will be treated as ss58. assert_eq!( - AccountId32::from_str("55c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d").unwrap_err(), + AccountId32::from_str( + "55c55177d67b064bb5d189a3e1ddad9bc6646e02e64d6e308f5acbb1533ac430d" + ) + .unwrap_err(), "invalid ss58 address.", ); } diff --git a/substrate/primitives/core/src/ecdsa.rs b/substrate/primitives/core/src/ecdsa.rs index ffdb5f5c4c9903fcc53de516dc64ff4e173e223c..b4c4bda17acbaed421d04fdd593a0f8650a82e6f 100644 --- a/substrate/primitives/core/src/ecdsa.rs +++ b/substrate/primitives/core/src/ecdsa.rs @@ -22,25 +22,30 @@ #[cfg(feature = "full_crypto")] use sp_std::vec::Vec; +use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::cmp::Ordering; -use codec::{Encode, Decode, MaxEncodedLen}; -#[cfg(feature = "full_crypto")] -use core::convert::{TryFrom, TryInto}; -#[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] -use crate::{hashing::blake2_256, crypto::{Pair as TraitPair, DeriveJunction, SecretStringError}}; #[cfg(feature = "std")] use crate::crypto::Ss58Codec; +use crate::crypto::{ + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, UncheckedFrom, +}; +#[cfg(feature = "full_crypto")] +use crate::{ + crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}, + hashing::blake2_256, +}; #[cfg(feature = "std")] -use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; -use sp_runtime_interface::pass_by::PassByInner; +use bip39::{Language, Mnemonic, MnemonicType}; +#[cfg(feature = "full_crypto")] +use core::convert::{TryFrom, TryInto}; #[cfg(feature = "full_crypto")] use secp256k1::{PublicKey, SecretKey}; +#[cfg(feature = "std")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use sp_runtime_interface::pass_by::PassByInner; +#[cfg(feature = "std")] +use substrate_bip39::seed_from_entropy; /// An identifier used to match public keys against ecdsa keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); @@ -165,7 +170,6 @@ impl sp_std::convert::TryFrom<&[u8]> for Public { if data.len() == 33 { Ok(Self::from_slice(data)) } else { - Err(()) } } @@ -206,14 +210,20 @@ impl sp_std::fmt::Debug for Public { #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e))) } @@ -246,14 +256,20 @@ impl sp_std::convert::TryFrom<&[u8]> for Signature { #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&hex::encode(self)) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) @@ -359,7 +375,7 @@ impl Signature { #[cfg(feature = "full_crypto")] pub fn recover_prehashed(&self, message: &[u8; 32]) -> Option { let message = secp256k1::Message::parse(message); - + let sig: (_, _) = self.try_into().ok()?; secp256k1::recover(&message, &sig.0, &sig.1) @@ -381,7 +397,9 @@ impl From<(secp256k1::Signature, secp256k1::RecoveryId)> for Signature { #[cfg(feature = "full_crypto")] impl<'a> TryFrom<&'a Signature> for (secp256k1::Signature, secp256k1::RecoveryId) { type Error = (); - fn try_from(x: &'a Signature) -> Result<(secp256k1::Signature, secp256k1::RecoveryId), Self::Error> { + fn try_from( + x: &'a Signature, + ) -> Result<(secp256k1::Signature, secp256k1::RecoveryId), Self::Error> { Ok(( secp256k1::Signature::parse_slice(&x.0[0..64]).expect("hardcoded to 64 bytes; qed"), secp256k1::RecoveryId::parse(x.0[64]).map_err(|_| ())?, @@ -430,21 +448,22 @@ impl TraitPair for Pair { let phrase = mnemonic.phrase(); let (pair, seed) = Self::from_phrase(phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) + (pair, phrase.to_owned(), seed) } /// Generate key pair from given recovery phrase and password. #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { let big_seed = seed_from_entropy( Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)?.entropy(), + .map_err(|_| SecretStringError::InvalidPhrase)? + .entropy(), password.unwrap_or(""), - ).map_err(|_| SecretStringError::InvalidSeed)?; + ) + .map_err(|_| SecretStringError::InvalidSeed)?; let mut seed = Seed::default(); seed.copy_from_slice(&big_seed[0..32]); Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) @@ -462,16 +481,17 @@ impl TraitPair for Pair { /// /// You should never need to use this; generate(), generate_with_phrase fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = SecretKey::parse_slice(seed_slice) - .map_err(|_| SecretStringError::InvalidSeedLength)?; + let secret = + SecretKey::parse_slice(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; let public = PublicKey::from_secret_key(&secret); - Ok(Pair{ public, secret }) + Ok(Pair { public, secret }) } /// Derive a child key from a series of given junctions. - fn derive>(&self, + fn derive>( + &self, path: Iter, - _seed: Option + _seed: Option, ) -> Result<(Pair, Option), DeriveError> { let mut acc = self.secret.serialize(); for j in path { @@ -497,7 +517,10 @@ impl TraitPair for Pair { /// Verify a signature on a message. Returns true if the signature is good. fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); - let sig: (_, _) = match sig.try_into() { Ok(x) => x, _ => return false }; + let sig: (_, _) = match sig.try_into() { + Ok(x) => x, + _ => return false, + }; match secp256k1::recover(&message, &sig.0, &sig.1) { Ok(actual) => pubkey.0[..] == actual.serialize_compressed()[..], _ => false, @@ -510,9 +533,17 @@ impl TraitPair for Pair { /// size. Use it only if you're coming from byte buffers and need the speed. fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); - if sig.len() != 65 { return false } - let ri = match secp256k1::RecoveryId::parse(sig[64]) { Ok(x) => x, _ => return false }; - let sig = match secp256k1::Signature::parse_slice(&sig[0..64]) { Ok(x) => x, _ => return false }; + if sig.len() != 65 { + return false + } + let ri = match secp256k1::RecoveryId::parse(sig[64]) { + Ok(x) => x, + _ => return false, + }; + let sig = match secp256k1::Signature::parse_slice(&sig[0..64]) { + Ok(x) => x, + _ => return false, + }; match secp256k1::recover(&message, &sig, &ri) { Ok(actual) => pubkey.as_ref() == &actual.serialize()[1..], _ => false, @@ -554,30 +585,30 @@ impl Pair { /// and thus matches the given `public` key. pub fn verify_prehashed(sig: &Signature, message: &[u8; 32], public: &Public) -> bool { let message = secp256k1::Message::parse(message); - + let sig: (_, _) = match sig.try_into() { Ok(x) => x, _ => return false, }; - + match secp256k1::recover(&message, &sig.0, &sig.1) { Ok(actual) => public.0[..] == actual.serialize_compressed()[..], _ => false, } - } + } } impl CryptoType for Public { - #[cfg(feature="full_crypto")] + #[cfg(feature = "full_crypto")] type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature="full_crypto")] + #[cfg(feature = "full_crypto")] type Pair = Pair; } -#[cfg(feature="full_crypto")] +#[cfg(feature = "full_crypto")] impl CryptoType for Pair { type Pair = Pair; } @@ -585,16 +616,20 @@ impl CryptoType for Pair { #[cfg(test)] mod test { use super::*; + use crate::{ + crypto::{set_default_ss58_version, PublicError, DEV_PHRASE}, + keccak_256, + }; use hex_literal::hex; - use crate::{crypto::{DEV_PHRASE, set_default_ss58_version}, keccak_256}; use serde_json; - use crate::crypto::PublicError; #[test] fn default_phrase_should_be_used() { assert_eq!( Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), ); } @@ -613,9 +648,9 @@ mod test { #[test] fn test_vector_should_work() { - let pair = Pair::from_seed( - &hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60") - ); + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); let public = pair.public(); assert_eq!( public, @@ -634,8 +669,9 @@ mod test { fn test_vector_by_string_should_work() { let pair = Pair::from_string( "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - None - ).unwrap(); + None, + ) + .unwrap(); let public = pair.public(); assert_eq!( public, @@ -803,7 +839,8 @@ mod test { // `msg` shouldn't be mangled let msg = [0u8; 32]; let sig1 = pair.sign_prehashed(&msg); - let sig2: Signature = secp256k1::sign(&secp256k1::Message::parse(&msg), &pair.secret).into(); + let sig2: Signature = + secp256k1::sign(&secp256k1::Message::parse(&msg), &pair.secret).into(); assert_eq!(sig1, sig2); @@ -815,15 +852,16 @@ mod test { // using pre-hashed `msg` works let msg = keccak_256(b"this should be hashed"); let sig1 = pair.sign_prehashed(&msg); - let sig2: Signature = secp256k1::sign(&secp256k1::Message::parse(&msg), &pair.secret).into(); + let sig2: Signature = + secp256k1::sign(&secp256k1::Message::parse(&msg), &pair.secret).into(); - assert_eq!(sig1, sig2); + assert_eq!(sig1, sig2); } #[test] fn verify_prehashed_works() { let (pair, _, _) = Pair::generate_with_phrase(Some("password")); - + // `msg` and `sig` match let msg = keccak_256(b"this should be hashed"); let sig = pair.sign_prehashed(&msg); diff --git a/substrate/primitives/core/src/ed25519.rs b/substrate/primitives/core/src/ed25519.rs index 13ee4d8cdfbcd0536ca961b14074e60818a3b5b4..be70da31e641de0d9dd6096e6bfbf20f2026d96a 100644 --- a/substrate/primitives/core/src/ed25519.rs +++ b/substrate/primitives/core/src/ed25519.rs @@ -22,26 +22,28 @@ #[cfg(feature = "full_crypto")] use sp_std::vec::Vec; -use crate::{hash::H256, hash::H512}; -use codec::{Encode, Decode, MaxEncodedLen}; +use crate::hash::{H256, H512}; +use codec::{Decode, Encode, MaxEncodedLen}; +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; +use crate::crypto::{ + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, UncheckedFrom, +}; +#[cfg(feature = "full_crypto")] +use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; +#[cfg(feature = "std")] +use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] use core::convert::TryFrom; #[cfg(feature = "full_crypto")] use ed25519_dalek::{Signer as _, Verifier as _}; #[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{Pair as TraitPair, DeriveJunction, SecretStringError}; -#[cfg(feature = "std")] -use crate::crypto::Ss58Codec; -#[cfg(feature = "std")] -use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; use sp_std::ops::Deref; +#[cfg(feature = "std")] +use substrate_bip39::seed_from_entropy; /// An identifier used to match public keys against ed25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); @@ -55,8 +57,7 @@ type Seed = [u8; 32]; /// A public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( - PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Encode, Decode, Default, PassByInner, - MaxEncodedLen, + PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Encode, Decode, Default, PassByInner, MaxEncodedLen, )] pub struct Public(pub [u8; 32]); @@ -70,7 +71,7 @@ impl Clone for Pair { Pair(ed25519_dalek::Keypair { public: self.0.public, secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) - .expect("key is always the correct size; qed") + .expect("key is always the correct size; qed"), }) } } @@ -177,14 +178,20 @@ impl sp_std::fmt::Debug for Public { #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e))) } @@ -210,14 +217,20 @@ impl sp_std::convert::TryFrom<&[u8]> for Signature { #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&hex::encode(self)) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) @@ -438,21 +451,22 @@ impl TraitPair for Pair { let phrase = mnemonic.phrase(); let (pair, seed) = Self::from_phrase(phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) + (pair, phrase.to_owned(), seed) } /// Generate key pair from given recovery phrase and password. #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { let big_seed = seed_from_entropy( Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)?.entropy(), + .map_err(|_| SecretStringError::InvalidPhrase)? + .entropy(), password.unwrap_or(""), - ).map_err(|_| SecretStringError::InvalidSeed)?; + ) + .map_err(|_| SecretStringError::InvalidSeed)?; let mut seed = Seed::default(); seed.copy_from_slice(&big_seed[0..32]); Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) @@ -477,7 +491,8 @@ impl TraitPair for Pair { } /// Derive a child key from a series of given junctions. - fn derive>(&self, + fn derive>( + &self, path: Iter, _seed: Option, ) -> Result<(Pair, Option), DeriveError> { @@ -522,7 +537,7 @@ impl TraitPair for Pair { let sig = match ed25519_dalek::Signature::try_from(sig) { Ok(s) => s, - Err(_) => return false + Err(_) => return false, }; public_key.verify(message.as_ref(), &sig).is_ok() @@ -572,15 +587,17 @@ impl CryptoType for Pair { #[cfg(test)] mod test { use super::*; - use hex_literal::hex; use crate::crypto::DEV_PHRASE; + use hex_literal::hex; use serde_json; #[test] fn default_phrase_should_be_used() { assert_eq!( Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), ); } @@ -599,13 +616,16 @@ mod test { #[test] fn test_vector_should_work() { - let pair = Pair::from_seed( - &hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60") - ); - let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a") + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" )); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); let message = b""; let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); @@ -617,12 +637,16 @@ mod test { fn test_vector_by_string_should_work() { let pair = Pair::from_string( "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - None - ).unwrap(); + None, + ) + .unwrap(); let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a") - )); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); let message = b""; let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); let signature = Signature::from_raw(signature); @@ -644,9 +668,12 @@ mod test { fn seeded_pair_should_work() { let pair = Pair::from_seed(b"12345678901234567890123456789012"); let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee") - )); + assert_eq!( + public, + Public::from_raw(hex!( + "2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee" + )) + ); let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); let signature = pair.sign(&message[..]); println!("Correct signature: {:?}", signature); diff --git a/substrate/primitives/core/src/hash.rs b/substrate/primitives/core/src/hash.rs index 6ef1827a1ba0c6733ebadf001b15306c95b4c243..55a9664c9dad467ca5a5762958fca41b3434ea3a 100644 --- a/substrate/primitives/core/src/hash.rs +++ b/substrate/primitives/core/src/hash.rs @@ -55,13 +55,34 @@ mod tests { #[test] fn test_h256() { let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000000000000000000000000000"), - (H256::from_low_u64_be(2), "0x0000000000000000000000000000000000000000000000000000000000000002"), - (H256::from_low_u64_be(15), "0x000000000000000000000000000000000000000000000000000000000000000f"), - (H256::from_low_u64_be(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), - (H256::from_low_u64_be(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), - (H256::from_low_u64_be(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from_low_u64_be(u64::MAX), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), + ( + Default::default(), + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ( + H256::from_low_u64_be(2), + "0x0000000000000000000000000000000000000000000000000000000000000002", + ), + ( + H256::from_low_u64_be(15), + "0x000000000000000000000000000000000000000000000000000000000000000f", + ), + ( + H256::from_low_u64_be(16), + "0x0000000000000000000000000000000000000000000000000000000000000010", + ), + ( + H256::from_low_u64_be(1_000), + "0x00000000000000000000000000000000000000000000000000000000000003e8", + ), + ( + H256::from_low_u64_be(100_000), + "0x00000000000000000000000000000000000000000000000000000000000186a0", + ), + ( + H256::from_low_u64_be(u64::MAX), + "0x000000000000000000000000000000000000000000000000ffffffffffffffff", + ), ]; for (number, expected) in tests { @@ -72,9 +93,21 @@ mod tests { #[test] fn test_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000g\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x00000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); assert!(ser::from_str::("\"\"").unwrap_err().is_data()); assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); diff --git a/substrate/primitives/core/src/hasher.rs b/substrate/primitives/core/src/hasher.rs index 13a168c70f93cf8d7871255ab88761e20846ecc1..01680de0837629caff039714de527a3480131736 100644 --- a/substrate/primitives/core/src/hasher.rs +++ b/substrate/primitives/core/src/hasher.rs @@ -18,9 +18,9 @@ //! Substrate Blake2b Hasher implementation pub mod blake2 { - use hash_db::Hasher; - use hash256_std_hasher::Hash256StdHasher; use crate::hash::H256; + use hash256_std_hasher::Hash256StdHasher; + use hash_db::Hasher; /// Concrete implementation of Hasher using Blake2b 256-bit hashes #[derive(Debug)] @@ -38,9 +38,9 @@ pub mod blake2 { } pub mod keccak { - use hash_db::Hasher; - use hash256_std_hasher::Hash256StdHasher; use crate::hash::H256; + use hash256_std_hasher::Hash256StdHasher; + use hash_db::Hasher; /// Concrete implementation of Hasher using Keccak 256-bit hashes #[derive(Debug)] diff --git a/substrate/primitives/core/src/hexdisplay.rs b/substrate/primitives/core/src/hexdisplay.rs index e590eec0e5aec54b52ccd71343c36647fa7f7f7b..4d91db1567920b84e5d60d63c7e3a8a31b2dfe62 100644 --- a/substrate/primitives/core/src/hexdisplay.rs +++ b/substrate/primitives/core/src/hexdisplay.rs @@ -22,7 +22,9 @@ pub struct HexDisplay<'a>(&'a [u8]); impl<'a> HexDisplay<'a> { /// Create new instance that will display `d` as a hex string when displayed. - pub fn from(d: &'a R) -> Self { HexDisplay(d.as_bytes_ref()) } + pub fn from(d: &'a R) -> Self { + HexDisplay(d.as_bytes_ref()) + } } impl<'a> sp_std::fmt::Display for HexDisplay<'a> { @@ -60,15 +62,21 @@ pub trait AsBytesRef { } impl AsBytesRef for &[u8] { - fn as_bytes_ref(&self) -> &[u8] { self } + fn as_bytes_ref(&self) -> &[u8] { + self + } } impl AsBytesRef for [u8] { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } impl AsBytesRef for sp_std::vec::Vec { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } impl AsBytesRef for sp_storage::StorageKey { @@ -85,9 +93,11 @@ macro_rules! impl_non_endians { )* } } -impl_non_endians!([u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], - [u8; 10], [u8; 12], [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], - [u8; 48], [u8; 56], [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128]); +impl_non_endians!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 10], [u8; 12], + [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], [u8; 48], [u8; 56], + [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128] +); /// Format into ASCII + # + hex, suitable for storage key preimages. #[cfg(feature = "std")] @@ -103,7 +113,7 @@ pub fn ascii_format(asciish: &[u8]) -> String { latch = true; } r.push_str(&format!("{:02x}", *c)); - } + }, } } r diff --git a/substrate/primitives/core/src/lib.rs b/substrate/primitives/core/src/lib.rs index 1ca97e7c3ffc5b925f4ec1bd5029e399bceee1cd..8bc189b5c3714af36a746eabdebbf13cd0a9a10b 100644 --- a/substrate/primitives/core/src/lib.rs +++ b/substrate/primitives/core/src/lib.rs @@ -18,7 +18,6 @@ //! Shareable Substrate types. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] /// Initialize a key-value collection from array. @@ -32,17 +31,16 @@ macro_rules! map { ); } -use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; -use sp_std::prelude::*; -use sp_std::ops::Deref; +#[doc(hidden)] +pub use codec::{Decode, Encode}; #[cfg(feature = "std")] -use std::borrow::Cow; +pub use serde; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; +use sp_runtime_interface::pass_by::{PassByEnum, PassByInner}; +use sp_std::{ops::Deref, prelude::*}; #[cfg(feature = "std")] -pub use serde; -#[doc(hidden)] -pub use codec::{Encode, Decode}; +use std::borrow::Cow; pub use sp_debug_derive::RuntimeDebug; @@ -53,37 +51,39 @@ pub use impl_serde::serialize as bytes; pub mod hashing; #[cfg(feature = "full_crypto")] -pub use hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256, keccak_256}; -pub mod hexdisplay; +pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64}; pub mod crypto; +pub mod hexdisplay; pub mod u32_trait; -pub mod ed25519; -pub mod sr25519; +mod changes_trie; pub mod ecdsa; +pub mod ed25519; pub mod hash; #[cfg(feature = "std")] mod hasher; pub mod offchain; pub mod sandbox; -pub mod uint; -mod changes_trie; +pub mod sr25519; +pub mod testing; #[cfg(feature = "std")] pub mod traits; -pub mod testing; +pub mod uint; -pub use self::hash::{H160, H256, H512, convert_hash}; -pub use self::uint::{U256, U512}; +pub use self::{ + hash::{convert_hash, H160, H256, H512}, + uint::{U256, U512}, +}; pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; -pub use hash_db::Hasher; #[cfg(feature = "std")] pub use self::hasher::blake2::Blake2Hasher; #[cfg(feature = "std")] pub use self::hasher::keccak::KeccakHasher; +pub use hash_db::Hasher; pub use sp_storage as storage; @@ -117,14 +117,14 @@ impl ExecutionContext { use ExecutionContext::*; match self { - Importing | Syncing | BlockConstruction => - offchain::Capabilities::none(), + Importing | Syncing | BlockConstruction => offchain::Capabilities::none(), // Enable keystore, transaction pool and Offchain DB reads by default for offchain calls. OffchainCall(None) => [ offchain::Capability::Keystore, offchain::Capability::OffchainDbRead, offchain::Capability::TransactionPool, - ][..].into(), + ][..] + .into(), OffchainCall(Some((_, capabilities))) => *capabilities, } } @@ -133,19 +133,25 @@ impl ExecutionContext { /// Hex-serialized shim for `Vec`. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))] -pub struct Bytes(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Bytes(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl From> for Bytes { - fn from(s: Vec) -> Self { Bytes(s) } + fn from(s: Vec) -> Self { + Bytes(s) + } } impl From for Bytes { - fn from(s: OpaqueMetadata) -> Self { Bytes(s.0) } + fn from(s: OpaqueMetadata) -> Self { + Bytes(s.0) + } } impl Deref for Bytes { type Target = [u8]; - fn deref(&self) -> &[u8] { &self.0[..] } + fn deref(&self) -> &[u8] { + &self.0[..] + } } impl codec::WrapperTypeEncode for Bytes {} @@ -183,7 +189,9 @@ impl sp_std::ops::Deref for OpaqueMetadata { } /// Simple blob to hold a `PeerId` without committing to its format. -#[derive(Default, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, PassByInner)] +#[derive( + Default, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, PassByInner, +)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct OpaquePeerId(pub Vec); @@ -200,7 +208,7 @@ pub enum NativeOrEncoded { /// The native representation. Native(R), /// The encoded representation. - Encoded(Vec) + Encoded(Vec), } #[cfg(feature = "std")] diff --git a/substrate/primitives/core/src/offchain/mod.rs b/substrate/primitives/core/src/offchain/mod.rs index d3d2356b6ee8d53a2fe6e51dbee90bbf14eba202..d4e27fc6434846f3bd49c738e446c620d49a3db4 100644 --- a/substrate/primitives/core/src/offchain/mod.rs +++ b/substrate/primitives/core/src/offchain/mod.rs @@ -17,10 +17,13 @@ //! Offchain workers types -use codec::{Encode, Decode}; -use sp_std::{prelude::{Vec, Box}, convert::TryFrom}; use crate::{OpaquePeerId, RuntimeDebug}; -use sp_runtime_interface::pass_by::{PassByCodec, PassByInner, PassByEnum}; +use codec::{Decode, Encode}; +use sp_runtime_interface::pass_by::{PassByCodec, PassByEnum, PassByInner}; +use sp_std::{ + convert::TryFrom, + prelude::{Box, Vec}, +}; pub use crate::crypto::KeyTypeId; @@ -30,7 +33,7 @@ pub mod storage; pub mod testing; /// Persistent storage prefix used by the Offchain Worker API when creating a DB key. -pub const STORAGE_PREFIX : &[u8] = b"storage"; +pub const STORAGE_PREFIX: &[u8] = b"storage"; /// Offchain DB persistent (non-fork-aware) storage. pub trait OffchainStorage: Clone + Send + Sync { @@ -93,7 +96,9 @@ impl From for u32 { } /// Opaque type for offchain http requests. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, Encode, Decode, PassByInner)] +#[derive( + Clone, Copy, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, Encode, Decode, PassByInner, +)] #[cfg_attr(feature = "std", derive(Hash))] pub struct HttpRequestId(pub u16); @@ -123,7 +128,7 @@ impl TryFrom for HttpError { e if e == HttpError::DeadlineReached as u8 as u32 => Ok(HttpError::DeadlineReached), e if e == HttpError::IoError as u8 as u32 => Ok(HttpError::IoError), e if e == HttpError::Invalid as u8 as u32 => Ok(HttpError::Invalid), - _ => Err(()) + _ => Err(()), } } } @@ -202,11 +207,15 @@ impl OpaqueMultiaddr { } /// Opaque timestamp type -#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode)] +#[derive( + Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode, +)] pub struct Timestamp(u64); /// Duration type -#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode)] +#[derive( + Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode, +)] pub struct Duration(u64); impl Duration { @@ -290,11 +299,7 @@ impl Capabilities { /// Those calls should be allowed to sign and submit transactions /// and access offchain workers database (but read only!). pub fn rich_offchain_call() -> Self { - [ - Capability::TransactionPool, - Capability::Keystore, - Capability::OffchainDbRead, - ][..].into() + [Capability::TransactionPool, Capability::Keystore, Capability::OffchainDbRead][..].into() } /// Check if particular capability is enabled. @@ -345,12 +350,11 @@ pub trait Externalities: Send { /// Returns an error if: /// - No new request identifier could be allocated. /// - The method or URI contain invalid characters. - /// fn http_request_start( &mut self, method: &str, uri: &str, - meta: &[u8] + meta: &[u8], ) -> Result; /// Append header to the request. @@ -365,12 +369,11 @@ pub trait Externalities: Send { /// /// An error doesn't poison the request, and you can continue as if the call had never been /// made. - /// fn http_request_add_header( &mut self, request_id: HttpRequestId, name: &str, - value: &str + value: &str, ) -> Result<(), ()>; /// Write a chunk of request body. @@ -387,12 +390,11 @@ pub trait Externalities: Send { /// - The deadline is reached. /// - An I/O error has happened, for example the remote has closed our /// request. The request is then considered invalid. - /// fn http_request_write_body( &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError>; /// Block and wait for the responses for given requests. @@ -408,7 +410,7 @@ pub trait Externalities: Send { fn http_response_wait( &mut self, ids: &[HttpRequestId], - deadline: Option + deadline: Option, ) -> Vec; /// Read all response headers. @@ -420,10 +422,7 @@ pub trait Externalities: Send { /// /// Returns an empty list if the identifier is unknown/invalid, hasn't /// received a response, or has finished. - fn http_response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)>; + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)>; /// Read a chunk of body response to given buffer. /// @@ -443,12 +442,11 @@ pub trait Externalities: Send { /// - The deadline is reached. /// - An I/O error has happened, for example the remote has closed our /// request. The request is then considered invalid. - /// fn http_response_read_body( &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result; /// Set the authorized nodes from runtime. @@ -466,11 +464,11 @@ pub trait Externalities: Send { impl Externalities for Box { fn is_validator(&self) -> bool { - (& **self).is_validator() + (&**self).is_validator() } fn network_state(&self) -> Result { - (& **self).network_state() + (&**self).network_state() } fn timestamp(&mut self) -> Timestamp { @@ -485,11 +483,21 @@ impl Externalities for Box { (&mut **self).random_seed() } - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { (&mut **self).http_request_start(method, uri, meta) } - fn http_request_add_header(&mut self, request_id: HttpRequestId, name: &str, value: &str) -> Result<(), ()> { + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { (&mut **self).http_request_add_header(request_id, name, value) } @@ -497,12 +505,16 @@ impl Externalities for Box { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { (&mut **self).http_request_write_body(request_id, chunk, deadline) } - fn http_response_wait(&mut self, ids: &[HttpRequestId], deadline: Option) -> Vec { + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { (&mut **self).http_response_wait(ids, deadline) } @@ -514,7 +526,7 @@ impl Externalities for Box { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { (&mut **self).http_response_read_body(request_id, buffer, deadline) } @@ -533,10 +545,7 @@ pub struct LimitedExternalities { impl LimitedExternalities { /// Create new externalities limited to given `capabilities`. pub fn new(capabilities: Capabilities, externalities: T) -> Self { - Self { - capabilities, - externalities, - } + Self { capabilities, externalities } } /// Check if given capability is allowed. @@ -575,12 +584,22 @@ impl Externalities for LimitedExternalities { self.externalities.random_seed() } - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { self.check(Capability::Http, "http_request_start"); self.externalities.http_request_start(method, uri, meta) } - fn http_request_add_header(&mut self, request_id: HttpRequestId, name: &str, value: &str) -> Result<(), ()> { + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { self.check(Capability::Http, "http_request_add_header"); self.externalities.http_request_add_header(request_id, name, value) } @@ -589,13 +608,17 @@ impl Externalities for LimitedExternalities { &mut self, request_id: HttpRequestId, chunk: &[u8], - deadline: Option + deadline: Option, ) -> Result<(), HttpError> { self.check(Capability::Http, "http_request_write_body"); self.externalities.http_request_write_body(request_id, chunk, deadline) } - fn http_response_wait(&mut self, ids: &[HttpRequestId], deadline: Option) -> Vec { + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { self.check(Capability::Http, "http_response_wait"); self.externalities.http_response_wait(ids, deadline) } @@ -609,7 +632,7 @@ impl Externalities for LimitedExternalities { &mut self, request_id: HttpRequestId, buffer: &mut [u8], - deadline: Option + deadline: Option, ) -> Result { self.check(Capability::Http, "http_response_read_body"); self.externalities.http_response_read_body(request_id, buffer, deadline) @@ -717,7 +740,8 @@ impl DbExternalities for LimitedExternalities { new_value: &[u8], ) -> bool { self.check(Capability::OffchainDbWrite, "local_storage_compare_and_set"); - self.externalities.local_storage_compare_and_set(kind, key, old_value, new_value) + self.externalities + .local_storage_compare_and_set(kind, key, old_value, new_value) } fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { diff --git a/substrate/primitives/core/src/offchain/storage.rs b/substrate/primitives/core/src/offchain/storage.rs index 4463c58ede5d50c3ab49e0a1364b5ee408ce25ce..ff72006cffd6047ac8b3056256707c85ccae3a59 100644 --- a/substrate/primitives/core/src/offchain/storage.rs +++ b/substrate/primitives/core/src/offchain/storage.rs @@ -17,9 +17,11 @@ //! In-memory implementation of offchain workers database. -use std::collections::hash_map::{HashMap, Entry}; use crate::offchain::OffchainStorage; -use std::iter::Iterator; +use std::{ + collections::hash_map::{Entry, HashMap}, + iter::Iterator, +}; /// In-memory storage for offchain workers. #[derive(Debug, Clone, Default)] @@ -29,12 +31,12 @@ pub struct InMemOffchainStorage { impl InMemOffchainStorage { /// Consume the offchain storage and iterate over all key value pairs. - pub fn into_iter(self) -> impl Iterator,Vec)> { + pub fn into_iter(self) -> impl Iterator, Vec)> { self.storage.into_iter() } /// Iterate over all key value pairs by reference. - pub fn iter(&self) -> impl Iterator,&Vec)> { + pub fn iter(&self) -> impl Iterator, &Vec)> { self.storage.iter() } @@ -71,10 +73,13 @@ impl OffchainStorage for InMemOffchainStorage { let key = prefix.iter().chain(key).cloned().collect(); match self.storage.entry(key) { - Entry::Vacant(entry) => if old_value.is_none() { - entry.insert(new_value.to_vec()); - true - } else { false }, + Entry::Vacant(entry) => + if old_value.is_none() { + entry.insert(new_value.to_vec()); + true + } else { + false + }, Entry::Occupied(ref mut entry) if Some(entry.get().as_slice()) == old_value => { entry.insert(new_value.to_vec()); true diff --git a/substrate/primitives/core/src/offchain/testing.rs b/substrate/primitives/core/src/offchain/testing.rs index 76c81d4b9bc6ce01398fe1e0e5a120bcf8c3c81a..ce88ece07da1d24f2954311968979fcd55f5970a 100644 --- a/substrate/primitives/core/src/offchain/testing.rs +++ b/substrate/primitives/core/src/offchain/testing.rs @@ -20,24 +20,18 @@ //! Namely all ExecutionExtensions that allow mocking //! the extra APIs. +use crate::{ + offchain::{ + self, storage::InMemOffchainStorage, HttpError, HttpRequestId as RequestId, + HttpRequestStatus as RequestStatus, OffchainOverlayedChange, OffchainStorage, + OpaqueNetworkState, StorageKind, Timestamp, TransactionPool, + }, + OpaquePeerId, +}; use std::{ collections::{BTreeMap, VecDeque}, sync::Arc, }; -use crate::OpaquePeerId; -use crate::offchain::{ - self, - OffchainOverlayedChange, - storage::InMemOffchainStorage, - HttpError, - HttpRequestId as RequestId, - HttpRequestStatus as RequestStatus, - Timestamp, - StorageKind, - OpaqueNetworkState, - TransactionPool, - OffchainStorage, -}; use parking_lot::RwLock; @@ -75,9 +69,7 @@ impl TestPersistentOffchainDB { /// Create a new and empty offchain storage db for persistent items pub fn new() -> Self { - Self { - persistent: Arc::new(RwLock::new(InMemOffchainStorage::default())) - } + Self { persistent: Arc::new(RwLock::new(InMemOffchainStorage::default())) } } /// Apply a set of off-chain changes directly to the test backend @@ -88,7 +80,8 @@ impl TestPersistentOffchainDB { let mut me = self.persistent.write(); for ((_prefix, key), value_operation) in changes { match value_operation { - OffchainOverlayedChange::SetValue(val) => me.set(Self::PREFIX, key.as_slice(), val.as_slice()), + OffchainOverlayedChange::SetValue(val) => + me.set(Self::PREFIX, key.as_slice(), val.as_slice()), OffchainOverlayedChange::Remove => me.remove(Self::PREFIX, key.as_slice()), } } @@ -124,7 +117,6 @@ impl OffchainStorage for TestPersistentOffchainDB { } } - /// Internal state of the externalities. /// /// This can be used in tests to respond or assert stuff about interactions. @@ -151,20 +143,17 @@ impl OffchainState { id: u16, expected: PendingRequest, response: impl Into>, - response_headers: impl IntoIterator, + response_headers: impl IntoIterator, ) { match self.requests.get_mut(&RequestId(id)) { None => { panic!("Missing pending request: {:?}.\n\nAll: {:?}", id, self.requests); - } + }, Some(req) => { - assert_eq!( - *req, - expected, - ); + assert_eq!(*req, expected,); req.response = Some(response.into()); req.response_headers = response_headers.into_iter().collect(); - } + }, } } @@ -213,7 +202,9 @@ impl TestOffchainExt { } /// Create new `TestOffchainExt` and a reference to the internal state. - pub fn with_offchain_db(offchain_db: TestPersistentOffchainDB) -> (Self, Arc>) { + pub fn with_offchain_db( + offchain_db: TestPersistentOffchainDB, + ) -> (Self, Arc>) { let (ext, state) = Self::new(); ext.0.write().persistent_storage = offchain_db; (ext, state) @@ -226,10 +217,7 @@ impl offchain::Externalities for TestOffchainExt { } fn network_state(&self) -> Result { - Ok(OpaqueNetworkState { - peer_id: Default::default(), - external_addresses: vec![], - }) + Ok(OpaqueNetworkState { peer_id: Default::default(), external_addresses: vec![] }) } fn timestamp(&mut self) -> Timestamp { @@ -244,15 +232,23 @@ impl offchain::Externalities for TestOffchainExt { self.0.read().seed } - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { let mut state = self.0.write(); let id = RequestId(state.requests.len() as u16); - state.requests.insert(id, PendingRequest { - method: method.into(), - uri: uri.into(), - meta: meta.into(), - ..Default::default() - }); + state.requests.insert( + id, + PendingRequest { + method: method.into(), + uri: uri.into(), + meta: meta.into(), + ..Default::default() + }, + ); Ok(id) } @@ -275,7 +271,7 @@ impl offchain::Externalities for TestOffchainExt { &mut self, request_id: RequestId, chunk: &[u8], - _deadline: Option + _deadline: Option, ) -> Result<(), HttpError> { let mut state = self.0.write(); @@ -302,12 +298,14 @@ impl offchain::Externalities for TestOffchainExt { ) -> Vec { let state = self.0.read(); - ids.iter().map(|id| match state.requests.get(id) { - Some(req) if req.response.is_none() => - panic!("No `response` provided for request with id: {:?}", id), - None => RequestStatus::Invalid, - _ => RequestStatus::Finished(200), - }).collect() + ids.iter() + .map(|id| match state.requests.get(id) { + Some(req) if req.response.is_none() => + panic!("No `response` provided for request with id: {:?}", id), + None => RequestStatus::Invalid, + _ => RequestStatus::Finished(200), + }) + .collect() } fn http_response_headers(&mut self, request_id: RequestId) -> Vec<(Vec, Vec)> { @@ -327,11 +325,12 @@ impl offchain::Externalities for TestOffchainExt { &mut self, request_id: RequestId, buffer: &mut [u8], - _deadline: Option + _deadline: Option, ) -> Result { let mut state = self.0.write(); if let Some(req) = state.requests.get_mut(&request_id) { - let response = req.response + let response = req + .response .as_mut() .unwrap_or_else(|| panic!("No response provided for request: {:?}", request_id)); @@ -377,14 +376,14 @@ impl offchain::DbExternalities for TestOffchainExt { kind: StorageKind, key: &[u8], old_value: Option<&[u8]>, - new_value: &[u8] + new_value: &[u8], ) -> bool { let mut state = self.0.write(); match kind { - StorageKind::LOCAL => state.local_storage - .compare_and_set(b"", key, old_value, new_value), - StorageKind::PERSISTENT => state.persistent_storage - .compare_and_set(b"", key, old_value, new_value), + StorageKind::LOCAL => + state.local_storage.compare_and_set(b"", key, old_value, new_value), + StorageKind::PERSISTENT => + state.persistent_storage.compare_and_set(b"", key, old_value, new_value), } } diff --git a/substrate/primitives/core/src/sandbox.rs b/substrate/primitives/core/src/sandbox.rs index a15a7af4183130ad916f2815cce44e81baf2ab0c..acc3fda5e9b1781a4e4469f6b3ea97026508127d 100644 --- a/substrate/primitives/core/src/sandbox.rs +++ b/substrate/primitives/core/src/sandbox.rs @@ -17,17 +17,15 @@ //! Definition of a sandbox environment. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::vec::Vec; /// Error error that can be returned from host function. -#[derive(Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Encode, Decode, crate::RuntimeDebug)] pub struct HostError; /// Describes an entity to define or import into the environment. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub enum ExternEntity { /// Function that is specified by an index in a default table of /// a module that creates the sandbox. @@ -44,8 +42,7 @@ pub enum ExternEntity { /// /// Each entry has a two-level name and description of an entity /// being defined. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub struct Entry { /// Module name of which corresponding entity being defined. pub module_name: Vec, @@ -56,8 +53,7 @@ pub struct Entry { } /// Definition of runtime that could be used by sandboxed code. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub struct EnvironmentDefinition { /// Vector of all entries in the environment definition. pub entries: Vec, @@ -91,8 +87,8 @@ pub const ERR_EXECUTION: u32 = -3i32 as u32; #[cfg(test)] mod tests { use super::*; - use std::fmt; use codec::Codec; + use std::fmt; fn roundtrip(s: S) { let encoded = s.encode(); @@ -101,28 +97,22 @@ mod tests { #[test] fn env_def_roundtrip() { - roundtrip(EnvironmentDefinition { - entries: vec![], - }); + roundtrip(EnvironmentDefinition { entries: vec![] }); roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"kernel"[..].into(), - field_name: b"memory"[..].into(), - entity: ExternEntity::Memory(1337), - }, - ], + entries: vec![Entry { + module_name: b"kernel"[..].into(), + field_name: b"memory"[..].into(), + entity: ExternEntity::Memory(1337), + }], }); roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"env"[..].into(), - field_name: b"abort"[..].into(), - entity: ExternEntity::Function(228), - }, - ], + entries: vec![Entry { + module_name: b"env"[..].into(), + field_name: b"abort"[..].into(), + entity: ExternEntity::Function(228), + }], }); } } diff --git a/substrate/primitives/core/src/sr25519.rs b/substrate/primitives/core/src/sr25519.rs index dbfb8ba1d26fdad31860cae8637fcd10e1433fdd..7e98bee96d83e11ad06f97c697caaab50b18b22c 100644 --- a/substrate/primitives/core/src/sr25519.rs +++ b/substrate/primitives/core/src/sr25519.rs @@ -21,34 +21,38 @@ //! Note: `CHAIN_CODE_LENGTH` must be equal to `crate::crypto::JUNCTION_ID_LEN` //! for this to work. // end::description[] +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; #[cfg(feature = "full_crypto")] -use sp_std::vec::Vec; +use crate::crypto::{DeriveJunction, Infallible, Pair as TraitPair, SecretStringError}; +#[cfg(feature = "std")] +use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] -use schnorrkel::{signing_context, ExpansionMode, Keypair, SecretKey, MiniSecretKey, PublicKey, - derive::{Derivation, ChainCode, CHAIN_CODE_LENGTH} +use schnorrkel::{ + derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, + signing_context, ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; +#[cfg(feature = "full_crypto")] +use sp_std::vec::Vec; #[cfg(feature = "std")] use std::convert::TryFrom; #[cfg(feature = "std")] use substrate_bip39::mini_secret_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{ - Pair as TraitPair, DeriveJunction, Infallible, SecretStringError -}; -#[cfg(feature = "std")] -use crate::crypto::Ss58Codec; -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; -use crate::hash::{H256, H512}; -use codec::{Encode, Decode, MaxEncodedLen}; +use crate::{ + crypto::{ + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, + UncheckedFrom, + }, + hash::{H256, H512}, +}; +use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::ops::Deref; -#[cfg(feature = "std")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "full_crypto")] use schnorrkel::keys::{MINI_SECRET_KEY_LENGTH, SECRET_KEY_LENGTH}; +#[cfg(feature = "std")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; // signing context @@ -61,8 +65,7 @@ pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); /// An Schnorrkel/Ristretto x25519 ("sr25519") public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive( - PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Encode, Decode, Default, PassByInner, - MaxEncodedLen, + PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Encode, Decode, Default, PassByInner, MaxEncodedLen, )] pub struct Public(pub [u8; 32]); @@ -76,7 +79,7 @@ impl Clone for Pair { Pair(schnorrkel::Keypair { public: self.0.public, secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) - .expect("key is always the correct size; qed") + .expect("key is always the correct size; qed"), }) } } @@ -176,14 +179,20 @@ impl sp_std::fmt::Debug for Public { #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&self.to_ss58check()) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { Public::from_ss58check(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e))) } @@ -211,14 +220,20 @@ impl sp_std::convert::TryFrom<&[u8]> for Signature { #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { serializer.serialize_str(&hex::encode(self)) } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { let signature_hex = hex::decode(&String::deserialize(deserializer)?) .map_err(|e| de::Error::custom(format!("{:?}", e)))?; Signature::try_from(signature_hex.as_ref()) @@ -350,7 +365,7 @@ impl Derive for Public { /// /// `None` if there are any hard junctions in there. #[cfg(feature = "std")] - fn derive>(&self, path: Iter) -> Option { + fn derive>(&self, path: Iter) -> Option { let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; for j in path { match j { @@ -471,8 +486,7 @@ impl TraitPair for Pair { /// /// A MiniSecretKey is literally what Ed25519 calls a SecretKey, which is just 32 random bytes. fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]) - .expect("32 bytes can always build a key; qed") + Self::from_seed_slice(&seed[..]).expect("32 bytes can always build a key; qed") } /// Get the public key. @@ -488,21 +502,17 @@ impl TraitPair for Pair { /// You should never need to use this; generate(), generate_with_phrase(), from_phrase() fn from_seed_slice(seed: &[u8]) -> Result { match seed.len() { - MINI_SECRET_KEY_LENGTH => { - Ok(Pair( - MiniSecretKey::from_bytes(seed) - .map_err(|_| SecretStringError::InvalidSeed)? - .expand_to_keypair(ExpansionMode::Ed25519) - )) - } - SECRET_KEY_LENGTH => { - Ok(Pair( - SecretKey::from_bytes(seed) - .map_err(|_| SecretStringError::InvalidSeed)? - .to_keypair() - )) - } - _ => Err(SecretStringError::InvalidSeedLength) + MINI_SECRET_KEY_LENGTH => Ok(Pair( + MiniSecretKey::from_bytes(seed) + .map_err(|_| SecretStringError::InvalidSeed)? + .expand_to_keypair(ExpansionMode::Ed25519), + )), + SECRET_KEY_LENGTH => Ok(Pair( + SecretKey::from_bytes(seed) + .map_err(|_| SecretStringError::InvalidSeed)? + .to_keypair(), + )), + _ => Err(SecretStringError::InvalidSeedLength), } } #[cfg(feature = "std")] @@ -511,20 +521,20 @@ impl TraitPair for Pair { let phrase = mnemonic.phrase(); let (pair, seed) = Self::from_phrase(phrase, password) .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) + (pair, phrase.to_owned(), seed) } #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { Mnemonic::from_phrase(phrase, Language::English) .map_err(|_| SecretStringError::InvalidPhrase) .map(|m| Self::from_entropy(m.entropy(), password)) } - fn derive>(&self, + fn derive>( + &self, path: Iter, seed: Option, ) -> Result<(Pair, Option), Self::DeriveError> { @@ -532,17 +542,22 @@ impl TraitPair for Pair { if let Ok(msk) = MiniSecretKey::from_bytes(&s) { if msk.expand(ExpansionMode::Ed25519) == self.0.secret { Some(msk) - } else { None } - } else { None } - } else { None }; + } else { + None + } + } else { + None + } + } else { + None + }; let init = self.0.secret.clone(); let (result, seed) = path.fold((init, seed), |(acc, acc_seed), j| match (j, acc_seed) { - (DeriveJunction::Soft(cc), _) => - (acc.derived_key_simple(ChainCode(cc), &[]).0, None), + (DeriveJunction::Soft(cc), _) => (acc.derived_key_simple(ChainCode(cc), &[]).0, None), (DeriveJunction::Hard(cc), maybe_seed) => { let seed = derive_hard_junction(&acc, &cc); (seed.expand(ExpansionMode::Ed25519), maybe_seed.map(|_| seed)) - } + }, }); Ok((Self(result.into()), seed.map(|s| MiniSecretKey::to_bytes(&s)))) } @@ -596,9 +611,9 @@ impl Pair { // Match both schnorrkel 0.1.1 and 0.8.0+ signatures, supporting both wallets // that have not been upgraded and those that have. match PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pk) => pk.verify_simple_preaudit_deprecated( - SIGNING_CTX, message.as_ref(), &sig.0[..], - ).is_ok(), + Ok(pk) => pk + .verify_simple_preaudit_deprecated(SIGNING_CTX, message.as_ref(), &sig.0[..]) + .is_ok(), Err(_) => false, } } @@ -642,20 +657,16 @@ pub fn verify_batch( for signature in signatures { match schnorrkel::Signature::from_bytes(signature.as_ref()) { Ok(s) => sr_signatures.push(s), - Err(_) => return false + Err(_) => return false, }; } - let mut messages: Vec = messages.into_iter().map( - |msg| signing_context(SIGNING_CTX).bytes(msg) - ).collect(); + let mut messages: Vec = messages + .into_iter() + .map(|msg| signing_context(SIGNING_CTX).bytes(msg)) + .collect(); - schnorrkel::verify_batch( - &mut messages, - &sr_signatures, - &sr_pub_keys, - true, - ).is_ok() + schnorrkel::verify_batch(&mut messages, &sr_signatures, &sr_pub_keys, true).is_ok() } #[cfg(test)] @@ -685,7 +696,9 @@ mod compatibility_test { #[test] fn verify_known_old_message_should_work() { - let public = Public::from_raw(hex!("b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918")); + let public = Public::from_raw(hex!( + "b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918" + )); // signature generated by the 1.1 version with the same ^^ public key. let signature = Signature::from_raw(hex!( "5a9755f069939f45d96aaf125cf5ce7ba1db998686f87f2fb3cbdea922078741a73891ba265f70c31436e18a9acd14d189d73c12317ab6c313285cd938453202" @@ -699,7 +712,7 @@ mod compatibility_test { #[cfg(test)] mod test { use super::*; - use crate::crypto::{Ss58Codec, DEV_PHRASE, DEV_ADDRESS}; + use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; use hex_literal::hex; use serde_json; @@ -707,10 +720,14 @@ mod test { fn default_phrase_should_be_used() { assert_eq!( Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), ); assert_eq!( - Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).as_ref().map(Pair::public), + Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None) + .as_ref() + .map(Pair::public), Pair::from_string("/Alice", None).as_ref().map(Pair::public) ); } @@ -856,9 +873,9 @@ mod test { // The values in this test case are compared to the output of `node-test.js` in schnorrkel-js. // // This is to make sure that the wasm library is compatible. - let pk = Pair::from_seed( - &hex!("0000000000000000000000000000000000000000000000000000000000000000") - ); + let pk = Pair::from_seed(&hex!( + "0000000000000000000000000000000000000000000000000000000000000000" + )); let public = pk.public(); let js_signature = Signature::from_raw(hex!( "28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00" diff --git a/substrate/primitives/core/src/testing.rs b/substrate/primitives/core/src/testing.rs index be1a83f1700957b68298602165b317ac18464907..865a03714a891a1026df4b9bc1212d26b4b6df26 100644 --- a/substrate/primitives/core/src/testing.rs +++ b/substrate/primitives/core/src/testing.rs @@ -162,7 +162,11 @@ impl crate::traits::SpawnNamed for TaskExecutor { #[cfg(feature = "std")] impl crate::traits::SpawnEssentialNamed for TaskExecutor { - fn spawn_essential_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + fn spawn_essential_blocking( + &self, + _: &'static str, + future: futures::future::BoxFuture<'static, ()>, + ) { self.0.spawn_ok(future); } fn spawn_essential(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { diff --git a/substrate/primitives/core/src/traits.rs b/substrate/primitives/core/src/traits.rs index d6503cb86a05d1c91f71ccbb09a587fee0921ae2..dfa61f606cb9e658b8641e79705085652d1b76fb 100644 --- a/substrate/primitives/core/src/traits.rs +++ b/substrate/primitives/core/src/traits.rs @@ -99,11 +99,7 @@ impl<'a> RuntimeCode<'a> { /// /// This is only useful for tests that don't want to execute any code. pub fn empty() -> Self { - Self { - code_fetcher: &NoneFetchRuntimeCode, - hash: Vec::new(), - heap_pages: None, - } + Self { code_fetcher: &NoneFetchRuntimeCode, hash: Vec::new(), heap_pages: None } } } @@ -225,7 +221,11 @@ pub trait SpawnEssentialNamed: Clone + Send + Sync { /// Spawn the given blocking future. /// /// The given `name` is used to identify the future in tracing. - fn spawn_essential_blocking(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>); + fn spawn_essential_blocking( + &self, + name: &'static str, + future: futures::future::BoxFuture<'static, ()>, + ); /// Spawn the given non-blocking future. /// /// The given `name` is used to identify the future in tracing. @@ -233,7 +233,11 @@ pub trait SpawnEssentialNamed: Clone + Send + Sync { } impl SpawnEssentialNamed for Box { - fn spawn_essential_blocking(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) { + fn spawn_essential_blocking( + &self, + name: &'static str, + future: futures::future::BoxFuture<'static, ()>, + ) { (**self).spawn_essential_blocking(name, future) } diff --git a/substrate/primitives/core/src/u32_trait.rs b/substrate/primitives/core/src/u32_trait.rs index 07f9bb00328324bf3c5802e9183e0aa2969ec6ce..37837e7c0548c7ceda9f6a84d4f7c61bb087b577 100644 --- a/substrate/primitives/core/src/u32_trait.rs +++ b/substrate/primitives/core/src/u32_trait.rs @@ -24,221 +24,547 @@ pub trait Value { } /// Type representing the value 0 for the `Value` trait. -pub struct _0; impl Value for _0 { const VALUE: u32 = 0; } +pub struct _0; +impl Value for _0 { + const VALUE: u32 = 0; +} /// Type representing the value 1 for the `Value` trait. -pub struct _1; impl Value for _1 { const VALUE: u32 = 1; } +pub struct _1; +impl Value for _1 { + const VALUE: u32 = 1; +} /// Type representing the value 2 for the `Value` trait. -pub struct _2; impl Value for _2 { const VALUE: u32 = 2; } +pub struct _2; +impl Value for _2 { + const VALUE: u32 = 2; +} /// Type representing the value 3 for the `Value` trait. -pub struct _3; impl Value for _3 { const VALUE: u32 = 3; } +pub struct _3; +impl Value for _3 { + const VALUE: u32 = 3; +} /// Type representing the value 4 for the `Value` trait. -pub struct _4; impl Value for _4 { const VALUE: u32 = 4; } +pub struct _4; +impl Value for _4 { + const VALUE: u32 = 4; +} /// Type representing the value 5 for the `Value` trait. -pub struct _5; impl Value for _5 { const VALUE: u32 = 5; } +pub struct _5; +impl Value for _5 { + const VALUE: u32 = 5; +} /// Type representing the value 6 for the `Value` trait. -pub struct _6; impl Value for _6 { const VALUE: u32 = 6; } +pub struct _6; +impl Value for _6 { + const VALUE: u32 = 6; +} /// Type representing the value 7 for the `Value` trait. -pub struct _7; impl Value for _7 { const VALUE: u32 = 7; } +pub struct _7; +impl Value for _7 { + const VALUE: u32 = 7; +} /// Type representing the value 8 for the `Value` trait. -pub struct _8; impl Value for _8 { const VALUE: u32 = 8; } +pub struct _8; +impl Value for _8 { + const VALUE: u32 = 8; +} /// Type representing the value 9 for the `Value` trait. -pub struct _9; impl Value for _9 { const VALUE: u32 = 9; } +pub struct _9; +impl Value for _9 { + const VALUE: u32 = 9; +} /// Type representing the value 10 for the `Value` trait. -pub struct _10; impl Value for _10 { const VALUE: u32 = 10; } +pub struct _10; +impl Value for _10 { + const VALUE: u32 = 10; +} /// Type representing the value 11 for the `Value` trait. -pub struct _11; impl Value for _11 { const VALUE: u32 = 11; } +pub struct _11; +impl Value for _11 { + const VALUE: u32 = 11; +} /// Type representing the value 12 for the `Value` trait. -pub struct _12; impl Value for _12 { const VALUE: u32 = 12; } +pub struct _12; +impl Value for _12 { + const VALUE: u32 = 12; +} /// Type representing the value 13 for the `Value` trait. -pub struct _13; impl Value for _13 { const VALUE: u32 = 13; } +pub struct _13; +impl Value for _13 { + const VALUE: u32 = 13; +} /// Type representing the value 14 for the `Value` trait. -pub struct _14; impl Value for _14 { const VALUE: u32 = 14; } +pub struct _14; +impl Value for _14 { + const VALUE: u32 = 14; +} /// Type representing the value 15 for the `Value` trait. -pub struct _15; impl Value for _15 { const VALUE: u32 = 15; } +pub struct _15; +impl Value for _15 { + const VALUE: u32 = 15; +} /// Type representing the value 16 for the `Value` trait. -pub struct _16; impl Value for _16 { const VALUE: u32 = 16; } +pub struct _16; +impl Value for _16 { + const VALUE: u32 = 16; +} /// Type representing the value 17 for the `Value` trait. -pub struct _17; impl Value for _17 { const VALUE: u32 = 17; } +pub struct _17; +impl Value for _17 { + const VALUE: u32 = 17; +} /// Type representing the value 18 for the `Value` trait. -pub struct _18; impl Value for _18 { const VALUE: u32 = 18; } +pub struct _18; +impl Value for _18 { + const VALUE: u32 = 18; +} /// Type representing the value 19 for the `Value` trait. -pub struct _19; impl Value for _19 { const VALUE: u32 = 19; } +pub struct _19; +impl Value for _19 { + const VALUE: u32 = 19; +} /// Type representing the value 20 for the `Value` trait. -pub struct _20; impl Value for _20 { const VALUE: u32 = 20; } +pub struct _20; +impl Value for _20 { + const VALUE: u32 = 20; +} /// Type representing the value 21 for the `Value` trait. -pub struct _21; impl Value for _21 { const VALUE: u32 = 21; } +pub struct _21; +impl Value for _21 { + const VALUE: u32 = 21; +} /// Type representing the value 22 for the `Value` trait. -pub struct _22; impl Value for _22 { const VALUE: u32 = 22; } +pub struct _22; +impl Value for _22 { + const VALUE: u32 = 22; +} /// Type representing the value 23 for the `Value` trait. -pub struct _23; impl Value for _23 { const VALUE: u32 = 23; } +pub struct _23; +impl Value for _23 { + const VALUE: u32 = 23; +} /// Type representing the value 24 for the `Value` trait. -pub struct _24; impl Value for _24 { const VALUE: u32 = 24; } +pub struct _24; +impl Value for _24 { + const VALUE: u32 = 24; +} /// Type representing the value 25 for the `Value` trait. -pub struct _25; impl Value for _25 { const VALUE: u32 = 25; } +pub struct _25; +impl Value for _25 { + const VALUE: u32 = 25; +} /// Type representing the value 26 for the `Value` trait. -pub struct _26; impl Value for _26 { const VALUE: u32 = 26; } +pub struct _26; +impl Value for _26 { + const VALUE: u32 = 26; +} /// Type representing the value 27 for the `Value` trait. -pub struct _27; impl Value for _27 { const VALUE: u32 = 27; } +pub struct _27; +impl Value for _27 { + const VALUE: u32 = 27; +} /// Type representing the value 28 for the `Value` trait. -pub struct _28; impl Value for _28 { const VALUE: u32 = 28; } +pub struct _28; +impl Value for _28 { + const VALUE: u32 = 28; +} /// Type representing the value 29 for the `Value` trait. -pub struct _29; impl Value for _29 { const VALUE: u32 = 29; } +pub struct _29; +impl Value for _29 { + const VALUE: u32 = 29; +} /// Type representing the value 30 for the `Value` trait. -pub struct _30; impl Value for _30 { const VALUE: u32 = 30; } +pub struct _30; +impl Value for _30 { + const VALUE: u32 = 30; +} /// Type representing the value 31 for the `Value` trait. -pub struct _31; impl Value for _31 { const VALUE: u32 = 31; } +pub struct _31; +impl Value for _31 { + const VALUE: u32 = 31; +} /// Type representing the value 32 for the `Value` trait. -pub struct _32; impl Value for _32 { const VALUE: u32 = 32; } +pub struct _32; +impl Value for _32 { + const VALUE: u32 = 32; +} /// Type representing the value 33 for the `Value` trait. -pub struct _33; impl Value for _33 { const VALUE: u32 = 33; } +pub struct _33; +impl Value for _33 { + const VALUE: u32 = 33; +} /// Type representing the value 34 for the `Value` trait. -pub struct _34; impl Value for _34 { const VALUE: u32 = 34; } +pub struct _34; +impl Value for _34 { + const VALUE: u32 = 34; +} /// Type representing the value 35 for the `Value` trait. -pub struct _35; impl Value for _35 { const VALUE: u32 = 35; } +pub struct _35; +impl Value for _35 { + const VALUE: u32 = 35; +} /// Type representing the value 36 for the `Value` trait. -pub struct _36; impl Value for _36 { const VALUE: u32 = 36; } +pub struct _36; +impl Value for _36 { + const VALUE: u32 = 36; +} /// Type representing the value 37 for the `Value` trait. -pub struct _37; impl Value for _37 { const VALUE: u32 = 37; } +pub struct _37; +impl Value for _37 { + const VALUE: u32 = 37; +} /// Type representing the value 38 for the `Value` trait. -pub struct _38; impl Value for _38 { const VALUE: u32 = 38; } +pub struct _38; +impl Value for _38 { + const VALUE: u32 = 38; +} /// Type representing the value 39 for the `Value` trait. -pub struct _39; impl Value for _39 { const VALUE: u32 = 39; } +pub struct _39; +impl Value for _39 { + const VALUE: u32 = 39; +} /// Type representing the value 40 for the `Value` trait. -pub struct _40; impl Value for _40 { const VALUE: u32 = 40; } +pub struct _40; +impl Value for _40 { + const VALUE: u32 = 40; +} /// Type representing the value 41 for the `Value` trait. -pub struct _41; impl Value for _41 { const VALUE: u32 = 41; } +pub struct _41; +impl Value for _41 { + const VALUE: u32 = 41; +} /// Type representing the value 42 for the `Value` trait. -pub struct _42; impl Value for _42 { const VALUE: u32 = 42; } +pub struct _42; +impl Value for _42 { + const VALUE: u32 = 42; +} /// Type representing the value 43 for the `Value` trait. -pub struct _43; impl Value for _43 { const VALUE: u32 = 43; } +pub struct _43; +impl Value for _43 { + const VALUE: u32 = 43; +} /// Type representing the value 44 for the `Value` trait. -pub struct _44; impl Value for _44 { const VALUE: u32 = 44; } +pub struct _44; +impl Value for _44 { + const VALUE: u32 = 44; +} /// Type representing the value 45 for the `Value` trait. -pub struct _45; impl Value for _45 { const VALUE: u32 = 45; } +pub struct _45; +impl Value for _45 { + const VALUE: u32 = 45; +} /// Type representing the value 46 for the `Value` trait. -pub struct _46; impl Value for _46 { const VALUE: u32 = 46; } +pub struct _46; +impl Value for _46 { + const VALUE: u32 = 46; +} /// Type representing the value 47 for the `Value` trait. -pub struct _47; impl Value for _47 { const VALUE: u32 = 47; } +pub struct _47; +impl Value for _47 { + const VALUE: u32 = 47; +} /// Type representing the value 48 for the `Value` trait. -pub struct _48; impl Value for _48 { const VALUE: u32 = 48; } +pub struct _48; +impl Value for _48 { + const VALUE: u32 = 48; +} /// Type representing the value 49 for the `Value` trait. -pub struct _49; impl Value for _49 { const VALUE: u32 = 49; } +pub struct _49; +impl Value for _49 { + const VALUE: u32 = 49; +} /// Type representing the value 50 for the `Value` trait. -pub struct _50; impl Value for _50 { const VALUE: u32 = 50; } +pub struct _50; +impl Value for _50 { + const VALUE: u32 = 50; +} /// Type representing the value 51 for the `Value` trait. -pub struct _51; impl Value for _51 { const VALUE: u32 = 51; } +pub struct _51; +impl Value for _51 { + const VALUE: u32 = 51; +} /// Type representing the value 52 for the `Value` trait. -pub struct _52; impl Value for _52 { const VALUE: u32 = 52; } +pub struct _52; +impl Value for _52 { + const VALUE: u32 = 52; +} /// Type representing the value 53 for the `Value` trait. -pub struct _53; impl Value for _53 { const VALUE: u32 = 53; } +pub struct _53; +impl Value for _53 { + const VALUE: u32 = 53; +} /// Type representing the value 54 for the `Value` trait. -pub struct _54; impl Value for _54 { const VALUE: u32 = 54; } +pub struct _54; +impl Value for _54 { + const VALUE: u32 = 54; +} /// Type representing the value 55 for the `Value` trait. -pub struct _55; impl Value for _55 { const VALUE: u32 = 55; } +pub struct _55; +impl Value for _55 { + const VALUE: u32 = 55; +} /// Type representing the value 56 for the `Value` trait. -pub struct _56; impl Value for _56 { const VALUE: u32 = 56; } +pub struct _56; +impl Value for _56 { + const VALUE: u32 = 56; +} /// Type representing the value 57 for the `Value` trait. -pub struct _57; impl Value for _57 { const VALUE: u32 = 57; } +pub struct _57; +impl Value for _57 { + const VALUE: u32 = 57; +} /// Type representing the value 58 for the `Value` trait. -pub struct _58; impl Value for _58 { const VALUE: u32 = 58; } +pub struct _58; +impl Value for _58 { + const VALUE: u32 = 58; +} /// Type representing the value 59 for the `Value` trait. -pub struct _59; impl Value for _59 { const VALUE: u32 = 59; } +pub struct _59; +impl Value for _59 { + const VALUE: u32 = 59; +} /// Type representing the value 60 for the `Value` trait. -pub struct _60; impl Value for _60 { const VALUE: u32 = 60; } +pub struct _60; +impl Value for _60 { + const VALUE: u32 = 60; +} /// Type representing the value 61 for the `Value` trait. -pub struct _61; impl Value for _61 { const VALUE: u32 = 61; } +pub struct _61; +impl Value for _61 { + const VALUE: u32 = 61; +} /// Type representing the value 62 for the `Value` trait. -pub struct _62; impl Value for _62 { const VALUE: u32 = 62; } +pub struct _62; +impl Value for _62 { + const VALUE: u32 = 62; +} /// Type representing the value 63 for the `Value` trait. -pub struct _63; impl Value for _63 { const VALUE: u32 = 63; } +pub struct _63; +impl Value for _63 { + const VALUE: u32 = 63; +} /// Type representing the value 64 for the `Value` trait. -pub struct _64; impl Value for _64 { const VALUE: u32 = 64; } +pub struct _64; +impl Value for _64 { + const VALUE: u32 = 64; +} /// Type representing the value 65 for the `Value` trait. -pub struct _65; impl Value for _65 { const VALUE: u32 = 65; } +pub struct _65; +impl Value for _65 { + const VALUE: u32 = 65; +} /// Type representing the value 66 for the `Value` trait. -pub struct _66; impl Value for _66 { const VALUE: u32 = 66; } +pub struct _66; +impl Value for _66 { + const VALUE: u32 = 66; +} /// Type representing the value 67 for the `Value` trait. -pub struct _67; impl Value for _67 { const VALUE: u32 = 67; } +pub struct _67; +impl Value for _67 { + const VALUE: u32 = 67; +} /// Type representing the value 68 for the `Value` trait. -pub struct _68; impl Value for _68 { const VALUE: u32 = 68; } +pub struct _68; +impl Value for _68 { + const VALUE: u32 = 68; +} /// Type representing the value 69 for the `Value` trait. -pub struct _69; impl Value for _69 { const VALUE: u32 = 69; } +pub struct _69; +impl Value for _69 { + const VALUE: u32 = 69; +} /// Type representing the value 70 for the `Value` trait. -pub struct _70; impl Value for _70 { const VALUE: u32 = 70; } +pub struct _70; +impl Value for _70 { + const VALUE: u32 = 70; +} /// Type representing the value 71 for the `Value` trait. -pub struct _71; impl Value for _71 { const VALUE: u32 = 71; } +pub struct _71; +impl Value for _71 { + const VALUE: u32 = 71; +} /// Type representing the value 72 for the `Value` trait. -pub struct _72; impl Value for _72 { const VALUE: u32 = 72; } +pub struct _72; +impl Value for _72 { + const VALUE: u32 = 72; +} /// Type representing the value 73 for the `Value` trait. -pub struct _73; impl Value for _73 { const VALUE: u32 = 73; } +pub struct _73; +impl Value for _73 { + const VALUE: u32 = 73; +} /// Type representing the value 74 for the `Value` trait. -pub struct _74; impl Value for _74 { const VALUE: u32 = 74; } +pub struct _74; +impl Value for _74 { + const VALUE: u32 = 74; +} /// Type representing the value 75 for the `Value` trait. -pub struct _75; impl Value for _75 { const VALUE: u32 = 75; } +pub struct _75; +impl Value for _75 { + const VALUE: u32 = 75; +} /// Type representing the value 76 for the `Value` trait. -pub struct _76; impl Value for _76 { const VALUE: u32 = 76; } +pub struct _76; +impl Value for _76 { + const VALUE: u32 = 76; +} /// Type representing the value 77 for the `Value` trait. -pub struct _77; impl Value for _77 { const VALUE: u32 = 77; } +pub struct _77; +impl Value for _77 { + const VALUE: u32 = 77; +} /// Type representing the value 78 for the `Value` trait. -pub struct _78; impl Value for _78 { const VALUE: u32 = 78; } +pub struct _78; +impl Value for _78 { + const VALUE: u32 = 78; +} /// Type representing the value 79 for the `Value` trait. -pub struct _79; impl Value for _79 { const VALUE: u32 = 79; } +pub struct _79; +impl Value for _79 { + const VALUE: u32 = 79; +} /// Type representing the value 80 for the `Value` trait. -pub struct _80; impl Value for _80 { const VALUE: u32 = 80; } +pub struct _80; +impl Value for _80 { + const VALUE: u32 = 80; +} /// Type representing the value 81 for the `Value` trait. -pub struct _81; impl Value for _81 { const VALUE: u32 = 81; } +pub struct _81; +impl Value for _81 { + const VALUE: u32 = 81; +} /// Type representing the value 82 for the `Value` trait. -pub struct _82; impl Value for _82 { const VALUE: u32 = 82; } +pub struct _82; +impl Value for _82 { + const VALUE: u32 = 82; +} /// Type representing the value 83 for the `Value` trait. -pub struct _83; impl Value for _83 { const VALUE: u32 = 83; } +pub struct _83; +impl Value for _83 { + const VALUE: u32 = 83; +} /// Type representing the value 84 for the `Value` trait. -pub struct _84; impl Value for _84 { const VALUE: u32 = 84; } +pub struct _84; +impl Value for _84 { + const VALUE: u32 = 84; +} /// Type representing the value 85 for the `Value` trait. -pub struct _85; impl Value for _85 { const VALUE: u32 = 85; } +pub struct _85; +impl Value for _85 { + const VALUE: u32 = 85; +} /// Type representing the value 86 for the `Value` trait. -pub struct _86; impl Value for _86 { const VALUE: u32 = 86; } +pub struct _86; +impl Value for _86 { + const VALUE: u32 = 86; +} /// Type representing the value 87 for the `Value` trait. -pub struct _87; impl Value for _87 { const VALUE: u32 = 87; } +pub struct _87; +impl Value for _87 { + const VALUE: u32 = 87; +} /// Type representing the value 88 for the `Value` trait. -pub struct _88; impl Value for _88 { const VALUE: u32 = 88; } +pub struct _88; +impl Value for _88 { + const VALUE: u32 = 88; +} /// Type representing the value 89 for the `Value` trait. -pub struct _89; impl Value for _89 { const VALUE: u32 = 89; } +pub struct _89; +impl Value for _89 { + const VALUE: u32 = 89; +} /// Type representing the value 90 for the `Value` trait. -pub struct _90; impl Value for _90 { const VALUE: u32 = 90; } +pub struct _90; +impl Value for _90 { + const VALUE: u32 = 90; +} /// Type representing the value 91 for the `Value` trait. -pub struct _91; impl Value for _91 { const VALUE: u32 = 91; } +pub struct _91; +impl Value for _91 { + const VALUE: u32 = 91; +} /// Type representing the value 92 for the `Value` trait. -pub struct _92; impl Value for _92 { const VALUE: u32 = 92; } +pub struct _92; +impl Value for _92 { + const VALUE: u32 = 92; +} /// Type representing the value 93 for the `Value` trait. -pub struct _93; impl Value for _93 { const VALUE: u32 = 93; } +pub struct _93; +impl Value for _93 { + const VALUE: u32 = 93; +} /// Type representing the value 94 for the `Value` trait. -pub struct _94; impl Value for _94 { const VALUE: u32 = 94; } +pub struct _94; +impl Value for _94 { + const VALUE: u32 = 94; +} /// Type representing the value 95 for the `Value` trait. -pub struct _95; impl Value for _95 { const VALUE: u32 = 95; } +pub struct _95; +impl Value for _95 { + const VALUE: u32 = 95; +} /// Type representing the value 96 for the `Value` trait. -pub struct _96; impl Value for _96 { const VALUE: u32 = 96; } +pub struct _96; +impl Value for _96 { + const VALUE: u32 = 96; +} /// Type representing the value 97 for the `Value` trait. -pub struct _97; impl Value for _97 { const VALUE: u32 = 97; } +pub struct _97; +impl Value for _97 { + const VALUE: u32 = 97; +} /// Type representing the value 98 for the `Value` trait. -pub struct _98; impl Value for _98 { const VALUE: u32 = 98; } +pub struct _98; +impl Value for _98 { + const VALUE: u32 = 98; +} /// Type representing the value 99 for the `Value` trait. -pub struct _99; impl Value for _99 { const VALUE: u32 = 99; } +pub struct _99; +impl Value for _99 { + const VALUE: u32 = 99; +} /// Type representing the value 100 for the `Value` trait. -pub struct _100; impl Value for _100 { const VALUE: u32 = 100; } +pub struct _100; +impl Value for _100 { + const VALUE: u32 = 100; +} /// Type representing the value 112 for the `Value` trait. -pub struct _112; impl Value for _112 { const VALUE: u32 = 112; } +pub struct _112; +impl Value for _112 { + const VALUE: u32 = 112; +} /// Type representing the value 128 for the `Value` trait. -pub struct _128; impl Value for _128 { const VALUE: u32 = 128; } +pub struct _128; +impl Value for _128 { + const VALUE: u32 = 128; +} /// Type representing the value 160 for the `Value` trait. -pub struct _160; impl Value for _160 { const VALUE: u32 = 160; } +pub struct _160; +impl Value for _160 { + const VALUE: u32 = 160; +} /// Type representing the value 192 for the `Value` trait. -pub struct _192; impl Value for _192 { const VALUE: u32 = 192; } +pub struct _192; +impl Value for _192 { + const VALUE: u32 = 192; +} /// Type representing the value 224 for the `Value` trait. -pub struct _224; impl Value for _224 { const VALUE: u32 = 224; } +pub struct _224; +impl Value for _224 { + const VALUE: u32 = 224; +} /// Type representing the value 256 for the `Value` trait. -pub struct _256; impl Value for _256 { const VALUE: u32 = 256; } +pub struct _256; +impl Value for _256 { + const VALUE: u32 = 256; +} /// Type representing the value 384 for the `Value` trait. -pub struct _384; impl Value for _384 { const VALUE: u32 = 384; } +pub struct _384; +impl Value for _384 { + const VALUE: u32 = 384; +} /// Type representing the value 512 for the `Value` trait. -pub struct _512; impl Value for _512 { const VALUE: u32 = 512; } - +pub struct _512; +impl Value for _512 { + const VALUE: u32 = 512; +} diff --git a/substrate/primitives/core/src/uint.rs b/substrate/primitives/core/src/uint.rs index ff45ad6ecf0d510289a70cffc8bc196545e9f0e3..a74980332ad28dd0e068f3b972646d9e29cc2b0b 100644 --- a/substrate/primitives/core/src/uint.rs +++ b/substrate/primitives/core/src/uint.rs @@ -22,7 +22,7 @@ pub use primitive_types::{U256, U512}; #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode}; + use codec::{Decode, Encode}; use sp_serializer as ser; macro_rules! test { @@ -55,34 +55,27 @@ mod tests { assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); } - } + }; } test!(U256, test_u256); #[test] fn test_u256_codec() { - let res1 = vec![120, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0]; - let res2 = vec![0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]; + let res1 = vec![ + 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + ]; + let res2 = vec![ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + ]; - assert_eq!( - U256::from(120).encode(), - res1); - assert_eq!( - U256::max_value().encode(), - res2); - assert_eq!( - U256::decode(&mut &res1[..]), - Ok(U256::from(120))); - assert_eq!( - U256::decode(&mut &res2[..]), - Ok(U256::max_value())); + assert_eq!(U256::from(120).encode(), res1); + assert_eq!(U256::max_value().encode(), res2); + assert_eq!(U256::decode(&mut &res1[..]), Ok(U256::from(120))); + assert_eq!(U256::decode(&mut &res2[..]), Ok(U256::max_value())); } #[test] @@ -91,10 +84,10 @@ mod tests { ser::to_string_pretty(&!U256::zero()), "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"") - .unwrap_err() - .is_data() - ); + assert!(ser::from_str::( + "\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap_err() + .is_data()); } } diff --git a/substrate/primitives/database/src/kvdb.rs b/substrate/primitives/database/src/kvdb.rs index d99fe6360ef7b7585c7c37d999be3865d6501b93..1a2b0513dc28adc00d08bf15434bf2d264c2f06b 100644 --- a/substrate/primitives/database/src/kvdb.rs +++ b/substrate/primitives/database/src/kvdb.rs @@ -16,30 +16,31 @@ // limitations under the License. /// A wrapper around `kvdb::Database` that implements `sp_database::Database` trait - use ::kvdb::{DBTransaction, KeyValueDB}; -use crate::{Database, Change, ColumnId, Transaction, error}; +use crate::{error, Change, ColumnId, Database, Transaction}; struct DbAdapter(D); fn handle_err(result: std::io::Result) -> T { match result { Ok(r) => r, - Err(e) => { + Err(e) => { panic!("Critical database error: {:?}", e); - } + }, } } /// Wrap RocksDb database into a trait object that implements `sp_database::Database` pub fn as_database(db: D) -> std::sync::Arc> - where D: KeyValueDB + 'static, H: Clone + AsRef<[u8]> +where + D: KeyValueDB + 'static, + H: Clone + AsRef<[u8]>, { std::sync::Arc::new(DbAdapter(db)) } -impl DbAdapter { +impl DbAdapter { // Returns counter key and counter value if it exists. fn read_counter(&self, col: ColumnId, key: &[u8]) -> error::Result<(Vec, Option)> { // Add a key suffix for the counter @@ -49,16 +50,16 @@ impl DbAdapter { Some(data) => { let mut counter_data = [0; 4]; if data.len() != 4 { - return Err(error::DatabaseError(Box::new( - std::io::Error::new(std::io::ErrorKind::Other, - format!("Unexpected counter len {}", data.len()))) - )) + return Err(error::DatabaseError(Box::new(std::io::Error::new( + std::io::ErrorKind::Other, + format!("Unexpected counter len {}", data.len()), + )))) } counter_data.copy_from_slice(&data); let counter = u32::from_le_bytes(counter_data); (counter_key, Some(counter)) }, - None => (counter_key, None) + None => (counter_key, None), }) } } @@ -70,27 +71,29 @@ impl> Database for DbAdapter { match change { Change::Set(col, key, value) => tx.put_vec(col, &key, value), Change::Remove(col, key) => tx.delete(col, &key), - Change::Store(col, key, value) => { - match self.read_counter(col, key.as_ref())? { - (counter_key, Some(mut counter)) => { - counter += 1; - tx.put(col, &counter_key, &counter.to_le_bytes()); - }, - (counter_key, None) => { - let d = 1u32.to_le_bytes(); - tx.put(col, &counter_key, &d); - tx.put_vec(col, key.as_ref(), value); - }, - } - } + Change::Store(col, key, value) => match self.read_counter(col, key.as_ref())? { + (counter_key, Some(mut counter)) => { + counter += 1; + tx.put(col, &counter_key, &counter.to_le_bytes()); + }, + (counter_key, None) => { + let d = 1u32.to_le_bytes(); + tx.put(col, &counter_key, &d); + tx.put_vec(col, key.as_ref(), value); + }, + }, Change::Reference(col, key) => { - if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { + if let (counter_key, Some(mut counter)) = + self.read_counter(col, key.as_ref())? + { counter += 1; tx.put(col, &counter_key, &counter.to_le_bytes()); } - } + }, Change::Release(col, key) => { - if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { + if let (counter_key, Some(mut counter)) = + self.read_counter(col, key.as_ref())? + { counter -= 1; if counter == 0 { tx.delete(col, &counter_key); @@ -99,7 +102,7 @@ impl> Database for DbAdapter { tx.put(col, &counter_key, &counter.to_le_bytes()); } } - } + }, } } self.0.write(tx).map_err(|e| error::DatabaseError(Box::new(e))) diff --git a/substrate/primitives/database/src/lib.rs b/substrate/primitives/database/src/lib.rs index 1fa0c8e49b01510c203d256d4b6e35bb79baa9f5..ed5d93ed5b9c65cf69fa0719096d8bbaa2840fec 100644 --- a/substrate/primitives/database/src/lib.rs +++ b/substrate/primitives/database/src/lib.rs @@ -18,11 +18,11 @@ //! The main database trait, allowing Substrate to store data persistently. pub mod error; -mod mem; mod kvdb; +mod mem; -pub use mem::MemDb; pub use crate::kvdb::as_database; +pub use mem::MemDb; /// An identifier for a column. pub type ColumnId = u32; @@ -118,10 +118,13 @@ impl std::fmt::Debug for dyn Database { pub fn with_get>( db: &dyn Database, col: ColumnId, - key: &[u8], mut f: impl FnMut(&[u8]) -> R + key: &[u8], + mut f: impl FnMut(&[u8]) -> R, ) -> Option { let mut result: Option = None; - let mut adapter = |k: &_| { result = Some(f(k)); }; + let mut adapter = |k: &_| { + result = Some(f(k)); + }; db.with_get(col, key, &mut adapter); result } diff --git a/substrate/primitives/database/src/mem.rs b/substrate/primitives/database/src/mem.rs index 24ddf03319711e84b626a276fc3beb0cd1574548..d1b1861e98fdd11e1f82967cb1b82ab8e7494cd4 100644 --- a/substrate/primitives/database/src/mem.rs +++ b/substrate/primitives/database/src/mem.rs @@ -17,41 +17,52 @@ //! In-memory implementation of `Database` -use std::collections::{HashMap, hash_map::Entry}; -use crate::{Database, Change, ColumnId, Transaction, error}; +use crate::{error, Change, ColumnId, Database, Transaction}; use parking_lot::RwLock; +use std::collections::{hash_map::Entry, HashMap}; #[derive(Default)] /// This implements `Database` as an in-memory hash map. `commit` is not atomic. pub struct MemDb(RwLock, (u32, Vec)>>>); impl Database for MemDb - where H: Clone + AsRef<[u8]> +where + H: Clone + AsRef<[u8]>, { fn commit(&self, transaction: Transaction) -> error::Result<()> { let mut s = self.0.write(); for change in transaction.0.into_iter() { match change { - Change::Set(col, key, value) => { s.entry(col).or_default().insert(key, (1, value)); }, - Change::Remove(col, key) => { s.entry(col).or_default().remove(&key); }, + Change::Set(col, key, value) => { + s.entry(col).or_default().insert(key, (1, value)); + }, + Change::Remove(col, key) => { + s.entry(col).or_default().remove(&key); + }, Change::Store(col, hash, value) => { - s.entry(col).or_default().entry(hash.as_ref().to_vec()) + s.entry(col) + .or_default() + .entry(hash.as_ref().to_vec()) .and_modify(|(c, _)| *c += 1) .or_insert_with(|| (1, value)); }, Change::Reference(col, hash) => { - if let Entry::Occupied(mut entry) = s.entry(col).or_default().entry(hash.as_ref().to_vec()) { + if let Entry::Occupied(mut entry) = + s.entry(col).or_default().entry(hash.as_ref().to_vec()) + { entry.get_mut().0 += 1; } - } + }, Change::Release(col, hash) => { - if let Entry::Occupied(mut entry) = s.entry(col).or_default().entry(hash.as_ref().to_vec()) { + if let Entry::Occupied(mut entry) = + s.entry(col).or_default().entry(hash.as_ref().to_vec()) + { entry.get_mut().0 -= 1; if entry.get().0 == 0 { entry.remove(); } } - } + }, } } @@ -76,4 +87,3 @@ impl MemDb { s.get(&col).map(|c| c.len()).unwrap_or(0) } } - diff --git a/substrate/primitives/debug-derive/src/impls.rs b/substrate/primitives/debug-derive/src/impls.rs index 898e4eef5d06bb0fcea027e5ea3051360784c087..4d79ee9880160a0cfc2bf64a049ae37ec29da0cb 100644 --- a/substrate/primitives/debug-derive/src/impls.rs +++ b/substrate/primitives/debug-derive/src/impls.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use quote::quote; use proc_macro2::TokenStream; -use syn::{Data, DeriveInput, parse_quote}; +use quote::quote; +use syn::{parse_quote, Data, DeriveInput}; pub fn debug_derive(ast: DeriveInput) -> proc_macro::TokenStream { let name_str = ast.ident.to_string(); @@ -28,11 +28,11 @@ pub fn debug_derive(ast: DeriveInput) -> proc_macro::TokenStream { let wh = generics.make_where_clause(); for t in ast.generics.type_params() { let name = &t.ident; - wh.predicates.push(parse_quote!{ #name : core::fmt::Debug }); + wh.predicates.push(parse_quote! { #name : core::fmt::Debug }); } generics.split_for_impl() }; - let gen = quote!{ + let gen = quote! { impl #impl_generics core::fmt::Debug for #name #ty_generics #where_clause { fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { #implementation @@ -62,32 +62,26 @@ mod implementation { mod implementation { use super::*; use proc_macro2::Span; - use syn::{Ident, Index, token::SelfValue}; + use syn::{token::SelfValue, Ident, Index}; /// Derive the inner implementation of `Debug::fmt` function. pub fn derive(name_str: &str, data: &Data) -> TokenStream { match *data { Data::Struct(ref s) => derive_struct(&name_str, &s.fields), - Data::Union(ref u) => derive_fields(&name_str, Fields::new(u.fields.named.iter(), None)), + Data::Union(ref u) => + derive_fields(&name_str, Fields::new(u.fields.named.iter(), None)), Data::Enum(ref e) => derive_enum(&name_str, &e), } } enum Fields { - Indexed { - indices: Vec, - }, - Unnamed { - vars: Vec, - }, - Named { - names: Vec, - this: Option, - }, + Indexed { indices: Vec }, + Unnamed { vars: Vec }, + Named { names: Vec, this: Option }, } impl Fields { - fn new<'a>(fields: impl Iterator, this: Option) -> Self { + fn new<'a>(fields: impl Iterator, this: Option) -> Self { let mut indices = vec![]; let mut names = vec![]; @@ -100,27 +94,17 @@ mod implementation { } if names.is_empty() { - Self::Indexed { - indices, - } + Self::Indexed { indices } } else { - Self::Named { - names, - this, - } + Self::Named { names, this } } } } - fn derive_fields<'a>( - name_str: &str, - fields: Fields, - ) -> TokenStream { + fn derive_fields<'a>(name_str: &str, fields: Fields) -> TokenStream { match fields { Fields::Named { names, this } => { - let names_str: Vec<_> = names.iter() - .map(|x| x.to_string()) - .collect(); + let names_str: Vec<_> = names.iter().map(|x| x.to_string()).collect(); let fields = match this { None => quote! { #( .field(#names_str, #names) )* }, @@ -132,16 +116,15 @@ mod implementation { #fields .finish() } - }, - Fields::Indexed { indices } => { + Fields::Indexed { indices } => { quote! { fmt.debug_tuple(#name_str) #( .field(&self.#indices) )* .finish() } }, - Fields::Unnamed { vars } => { + Fields::Unnamed { vars } => { quote! { fmt.debug_tuple(#name_str) #( .field(#vars) )* @@ -151,38 +134,33 @@ mod implementation { } } - fn derive_enum( - name: &str, - e: &syn::DataEnum, - ) -> TokenStream { - let v = e.variants - .iter() - .map(|v| { - let name = format!("{}::{}", name, v.ident); - let ident = &v.ident; - match v.fields { - syn::Fields::Named(ref f) => { - let names: Vec<_> = f.named.iter().flat_map(|f| f.ident.clone()).collect(); - let fields_impl = derive_fields(&name, Fields::Named { - names: names.clone(), - this: None, - }); - (ident, (quote!{ { #( ref #names ),* } }, fields_impl)) - }, - syn::Fields::Unnamed(ref f) => { - let names = f.unnamed.iter() - .enumerate() - .map(|(id, _)| Ident::new(&format!("a{}", id), Span::call_site())) - .collect::>(); - let fields_impl = derive_fields(&name, Fields::Unnamed { vars: names.clone() }); - (ident, (quote! { ( #( ref #names ),* ) }, fields_impl)) - }, - syn::Fields::Unit => { - let fields_impl = derive_fields(&name, Fields::Indexed { indices: vec![] }); - (ident, (quote! { }, fields_impl)) - }, - } - }); + fn derive_enum(name: &str, e: &syn::DataEnum) -> TokenStream { + let v = e.variants.iter().map(|v| { + let name = format!("{}::{}", name, v.ident); + let ident = &v.ident; + match v.fields { + syn::Fields::Named(ref f) => { + let names: Vec<_> = f.named.iter().flat_map(|f| f.ident.clone()).collect(); + let fields_impl = + derive_fields(&name, Fields::Named { names: names.clone(), this: None }); + (ident, (quote! { { #( ref #names ),* } }, fields_impl)) + }, + syn::Fields::Unnamed(ref f) => { + let names = f + .unnamed + .iter() + .enumerate() + .map(|(id, _)| Ident::new(&format!("a{}", id), Span::call_site())) + .collect::>(); + let fields_impl = derive_fields(&name, Fields::Unnamed { vars: names.clone() }); + (ident, (quote! { ( #( ref #names ),* ) }, fields_impl)) + }, + syn::Fields::Unit => { + let fields_impl = derive_fields(&name, Fields::Indexed { indices: vec![] }); + (ident, (quote! {}, fields_impl)) + }, + } + }); type Vecs = (Vec, Vec); let (variants, others): Vecs<_, _> = v.unzip(); @@ -196,23 +174,15 @@ mod implementation { } } - fn derive_struct( - name_str: &str, - fields: &syn::Fields, - ) -> TokenStream { + fn derive_struct(name_str: &str, fields: &syn::Fields) -> TokenStream { match *fields { syn::Fields::Named(ref f) => derive_fields( name_str, Fields::new(f.named.iter(), Some(syn::Token!(self)(Span::call_site()))), ), - syn::Fields::Unnamed(ref f) => derive_fields( - name_str, - Fields::new(f.unnamed.iter(), None), - ), - syn::Fields::Unit => derive_fields( - name_str, - Fields::Indexed { indices: vec![] }, - ), + syn::Fields::Unnamed(ref f) => + derive_fields(name_str, Fields::new(f.unnamed.iter(), None)), + syn::Fields::Unit => derive_fields(name_str, Fields::Indexed { indices: vec![] }), } } } diff --git a/substrate/primitives/debug-derive/src/lib.rs b/substrate/primitives/debug-derive/src/lib.rs index ebfbd614d9c8dcc6e99c6aa1109073419f55db31..7eaa3a0020e9325bb9761905f93030b592d438e8 100644 --- a/substrate/primitives/debug-derive/src/lib.rs +++ b/substrate/primitives/debug-derive/src/lib.rs @@ -38,6 +38,5 @@ use proc_macro::TokenStream; #[proc_macro_derive(RuntimeDebug)] pub fn debug_derive(input: TokenStream) -> TokenStream { - impls::debug_derive(syn::parse_macro_input!(input)) + impls::debug_derive(syn::parse_macro_input!(input)) } - diff --git a/substrate/primitives/debug-derive/tests/tests.rs b/substrate/primitives/debug-derive/tests/tests.rs index d51d6a05bf21c0ae220c7abedd48807060fe2ed0..4f4c7f4caabc2c8df3f496d48a941e4894578ce9 100644 --- a/substrate/primitives/debug-derive/tests/tests.rs +++ b/substrate/primitives/debug-derive/tests/tests.rs @@ -30,33 +30,17 @@ struct Named { enum EnumLongName { A, B(A, String), - VariantLongName { - a: A, - b: String, - }, + VariantLongName { a: A, b: String }, } - #[test] fn should_display_proper_debug() { use self::EnumLongName as Enum; - assert_eq!( - format!("{:?}", Unnamed(1, "abc".into())), - "Unnamed(1, \"abc\")" - ); - assert_eq!( - format!("{:?}", Named { a: 1, b: "abc".into() }), - "Named { a: 1, b: \"abc\" }" - ); - assert_eq!( - format!("{:?}", Enum::::A), - "EnumLongName::A" - ); - assert_eq!( - format!("{:?}", Enum::B(1, "abc".into())), - "EnumLongName::B(1, \"abc\")" - ); + assert_eq!(format!("{:?}", Unnamed(1, "abc".into())), "Unnamed(1, \"abc\")"); + assert_eq!(format!("{:?}", Named { a: 1, b: "abc".into() }), "Named { a: 1, b: \"abc\" }"); + assert_eq!(format!("{:?}", Enum::::A), "EnumLongName::A"); + assert_eq!(format!("{:?}", Enum::B(1, "abc".into())), "EnumLongName::B(1, \"abc\")"); assert_eq!( format!("{:?}", Enum::VariantLongName { a: 1, b: "abc".into() }), "EnumLongName::VariantLongName { a: 1, b: \"abc\" }" diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 69c6c09be448714668a1aaa7c27eff9ed6a7215b..55b69fde089038fe6724600f96b662f2610c5063 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -22,10 +22,13 @@ //! //! It is required that each extension implements the [`Extension`] trait. +use crate::Error; use sp_std::{ - collections::btree_map::{BTreeMap, Entry}, any::{Any, TypeId}, ops::DerefMut, boxed::Box, + any::{Any, TypeId}, + boxed::Box, + collections::btree_map::{BTreeMap, Entry}, + ops::DerefMut, }; -use crate::Error; /// Marker trait for types that should be registered as [`Externalities`](crate::Externalities) extension. /// @@ -101,7 +104,11 @@ pub trait ExtensionStore { /// Register extension `extension` with specified `type_id`. /// /// It should return error if extension is already registered. - fn register_extension_with_type_id(&mut self, type_id: TypeId, extension: Box) -> Result<(), Error>; + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), Error>; /// Deregister extension with speicifed 'type_id' and drop it. /// @@ -129,10 +136,7 @@ impl Extensions { } /// Register the given extension. - pub fn register( - &mut self, - ext: E, - ) { + pub fn register(&mut self, ext: E) { let type_id = ext.type_id(); self.extensions.insert(type_id, Box::new(ext)); } @@ -154,7 +158,10 @@ impl Extensions { /// Return a mutable reference to the requested extension. pub fn get_mut(&mut self, ext_type_id: TypeId) -> Option<&mut dyn Any> { - self.extensions.get_mut(&ext_type_id).map(DerefMut::deref_mut).map(Extension::as_mut_any) + self.extensions + .get_mut(&ext_type_id) + .map(DerefMut::deref_mut) + .map(Extension::as_mut_any) } /// Deregister extension for the given `type_id`. @@ -165,7 +172,9 @@ impl Extensions { } /// Returns a mutable iterator over all extensions. - pub fn iter_mut<'a>(&'a mut self) -> impl Iterator)> { + pub fn iter_mut<'a>( + &'a mut self, + ) -> impl Iterator)> { self.extensions.iter_mut() } } diff --git a/substrate/primitives/externalities/src/lib.rs b/substrate/primitives/externalities/src/lib.rs index 80bb5b99f31559862deccc75805f5b8a856034f7..b0ec16213b2c25f6feccf9b8374ee44e22df9a0f 100644 --- a/substrate/primitives/externalities/src/lib.rs +++ b/substrate/primitives/externalities/src/lib.rs @@ -25,12 +25,16 @@ //! //! This crate exposes the main [`Externalities`] trait. -use sp_std::{any::{Any, TypeId}, vec::Vec, boxed::Box}; +use sp_std::{ + any::{Any, TypeId}, + boxed::Box, + vec::Vec, +}; use sp_storage::{ChildInfo, TrackedStorageKey}; +pub use extensions::{Extension, ExtensionStore, Extensions}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; -pub use extensions::{Extension, Extensions, ExtensionStore}; mod extensions; mod scope_limited; @@ -68,20 +72,12 @@ pub trait Externalities: ExtensionStore { /// This may be optimized for large values. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option>; + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Read child runtime storage. /// /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option>; + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Set storage entry `key` of current contract being called (effective immediately). fn set_storage(&mut self, key: Vec, value: Vec) { @@ -89,12 +85,7 @@ pub trait Externalities: ExtensionStore { } /// Set child storage entry `key` of current contract being called (effective immediately). - fn set_child_storage( - &mut self, - child_info: &ChildInfo, - key: Vec, - value: Vec, - ) { + fn set_child_storage(&mut self, child_info: &ChildInfo, key: Vec, value: Vec) { self.place_child_storage(child_info, key, Some(value)) } @@ -104,11 +95,7 @@ pub trait Externalities: ExtensionStore { } /// Clear a child storage entry (`key`) of current contract being called (effective immediately). - fn clear_child_storage( - &mut self, - child_info: &ChildInfo, - key: &[u8], - ) { + fn clear_child_storage(&mut self, child_info: &ChildInfo, key: &[u8]) { self.place_child_storage(child_info, key.to_vec(), None) } @@ -118,11 +105,7 @@ pub trait Externalities: ExtensionStore { } /// Whether a child storage entry exists. - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> bool { + fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { self.child_storage(child_info, key).is_some() } @@ -130,11 +113,7 @@ pub trait Externalities: ExtensionStore { fn next_storage_key(&self, key: &[u8]) -> Option>; /// Returns the key immediately following the given key, if it exists, in child storage. - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option>; + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option>; /// Clear an entire child storage. /// @@ -169,12 +148,7 @@ pub trait Externalities: ExtensionStore { fn place_storage(&mut self, key: Vec, value: Option>); /// Set or clear a child storage entry. - fn place_child_storage( - &mut self, - child_info: &ChildInfo, - key: Vec, - value: Option>, - ); + fn place_child_storage(&mut self, child_info: &ChildInfo, key: Vec, value: Option>); /// Get the trie root of the current storage map. /// @@ -189,19 +163,12 @@ pub trait Externalities: ExtensionStore { /// /// If the storage root equals the default hash as defined by the trie, the key in the top-level /// storage map will be removed. - fn child_storage_root( - &mut self, - child_info: &ChildInfo, - ) -> Vec; + fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec; /// Append storage item. /// /// This assumes specific format of the storage item. Also there is no way to undo this operation. - fn storage_append( - &mut self, - key: Vec, - value: Vec, - ); + fn storage_append(&mut self, key: Vec, value: Vec); /// Get the changes trie root of the current storage overlay at a block with given `parent`. /// diff --git a/substrate/primitives/externalities/src/scope_limited.rs b/substrate/primitives/externalities/src/scope_limited.rs index 3b5013ba8e7febafefb0e7e51f23d19f3e99a93e..ab8be1f3fc81ec366d298bfcd84520a69d140998 100644 --- a/substrate/primitives/externalities/src/scope_limited.rs +++ b/substrate/primitives/externalities/src/scope_limited.rs @@ -25,7 +25,8 @@ environmental::environmental!(ext: trait Externalities); /// while executing the given closure [`with_externalities`] grants access to them. The externalities /// are only set for the same thread this function was called from. pub fn set_and_run_with_externalities(ext: &mut dyn Externalities, f: F) -> R - where F: FnOnce() -> R +where + F: FnOnce() -> R, { ext::using(ext, f) } diff --git a/substrate/primitives/finality-grandpa/src/lib.rs b/substrate/primitives/finality-grandpa/src/lib.rs index 5b393bd1d80e6f2a222cec6ba5e530ac701611f4..a083796d659c8e1b30f6cd3a3447af9af2d6182a 100644 --- a/substrate/primitives/finality-grandpa/src/lib.rs +++ b/substrate/primitives/finality-grandpa/src/lib.rs @@ -25,12 +25,11 @@ extern crate alloc; #[cfg(feature = "std")] use serde::Serialize; -use codec::{Encode, Decode, Input, Codec}; -use sp_runtime::{ConsensusEngineId, RuntimeDebug, traits::NumberFor}; -use sp_std::borrow::Cow; -use sp_std::vec::Vec; +use codec::{Codec, Decode, Encode, Input}; #[cfg(feature = "std")] -use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; +use sp_runtime::{traits::NumberFor, ConsensusEngineId, RuntimeDebug}; +use sp_std::{borrow::Cow, vec::Vec}; #[cfg(feature = "std")] use log::debug; @@ -39,7 +38,7 @@ use log::debug; pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::GRANDPA; mod app { - use sp_application_crypto::{app_crypto, key_types::GRANDPA, ed25519}; + use sp_application_crypto::{app_crypto, ed25519, key_types::GRANDPA}; app_crypto!(ed25519, GRANDPA); } @@ -181,10 +180,7 @@ impl EquivocationProof { /// Create a new `EquivocationProof` for the given set id and using the /// given equivocation as proof. pub fn new(set_id: SetId, equivocation: Equivocation) -> Self { - EquivocationProof { - set_id, - equivocation, - } + EquivocationProof { set_id, equivocation } } /// Returns the set id at which the equivocation occurred. @@ -277,7 +273,7 @@ where if $equivocation.first.0.target_hash == $equivocation.second.0.target_hash && $equivocation.first.0.target_number == $equivocation.second.0.target_number { - return false; + return false } // check signatures on both votes are valid @@ -297,17 +293,17 @@ where report.set_id, ); - return valid_first && valid_second; + return valid_first && valid_second }; } match report.equivocation { Equivocation::Prevote(equivocation) => { check!(equivocation, grandpa::Message::Prevote); - } + }, Equivocation::Precommit(equivocation) => { check!(equivocation, grandpa::Message::Precommit); - } + }, } } @@ -390,8 +386,8 @@ where H: Encode, N: Encode, { - use sp_core::crypto::Public; use sp_application_crypto::AppKey; + use sp_core::crypto::Public; use sp_std::convert::TryInto; let encoded = localized_payload(round, set_id, &message); @@ -400,13 +396,13 @@ where AuthorityId::ID, &public.to_public_crypto_pair(), &encoded[..], - ).ok().flatten()?.try_into().ok()?; + ) + .ok() + .flatten()? + .try_into() + .ok()?; - Some(grandpa::SignedMessage { - message, - signature, - id: public, - }) + Some(grandpa::SignedMessage { message, signature, id: public }) } /// WASM function call to check for pending changes. @@ -457,7 +453,7 @@ impl<'a> Decode for VersionedAuthorityList<'a> { fn decode(value: &mut I) -> Result { let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()); + return Err("unknown Grandpa authorities version".into()) } Ok(authorities.into()) } diff --git a/substrate/primitives/inherents/src/client_side.rs b/substrate/primitives/inherents/src/client_side.rs index 38639c5de32272e2b0cff20e54688d594872ac1b..18877cae5f3437ecf426e8ba097e66b36994a12a 100644 --- a/substrate/primitives/inherents/src/client_side.rs +++ b/substrate/primitives/inherents/src/client_side.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{InherentData, Error, InherentIdentifier}; +use crate::{Error, InherentData, InherentIdentifier}; use sp_runtime::traits::Block as BlockT; /// Something that can create inherent data providers. @@ -44,7 +44,9 @@ impl CreateInherentDataProviders Fut + Sync + Send, - Fut: std::future::Future>> + Send + 'static, + Fut: std::future::Future>> + + Send + + 'static, IDP: InherentDataProvider + 'static, ExtraArgs: Send + 'static, { diff --git a/substrate/primitives/inherents/src/lib.rs b/substrate/primitives/inherents/src/lib.rs index f0b5fdc940a92a140dfab07b4f8be4526080e7f8..922d5d19432724a786b06b3c918ce1c7446f9ebd 100644 --- a/substrate/primitives/inherents/src/lib.rs +++ b/substrate/primitives/inherents/src/lib.rs @@ -140,7 +140,7 @@ //! let block_production = if is_validator { //! // For block production we want to provide our inherent data provider //! cool_consensus_block_production(|_parent, ()| async { -//! Ok(InherentDataProvider) +//! Ok(InherentDataProvider) //! }).boxed() //! } else { //! futures::future::pending().boxed() @@ -162,9 +162,12 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; -use sp_std::{collections::btree_map::{BTreeMap, IntoIter, Entry}, vec::Vec}; +use sp_std::{ + collections::btree_map::{BTreeMap, Entry, IntoIter}, + vec::Vec, +}; #[cfg(feature = "std")] mod client_side; @@ -204,7 +207,7 @@ pub type InherentIdentifier = [u8; 8]; #[derive(Clone, Default, Encode, Decode)] pub struct InherentData { /// All inherent data encoded with parity-scale-codec and an identifier. - data: BTreeMap> + data: BTreeMap>, } impl InherentData { @@ -231,20 +234,14 @@ impl InherentData { entry.insert(inherent.encode()); Ok(()) }, - Entry::Occupied(_) => { - Err(Error::InherentDataExists(identifier)) - } + Entry::Occupied(_) => Err(Error::InherentDataExists(identifier)), } } /// Replace the data for an inherent. /// /// If it does not exist, the data is just inserted. - pub fn replace_data( - &mut self, - identifier: InherentIdentifier, - inherent: &I, - ) { + pub fn replace_data(&mut self, identifier: InherentIdentifier, inherent: &I) { self.data.insert(identifier, inherent.encode()); } @@ -260,11 +257,10 @@ impl InherentData { identifier: &InherentIdentifier, ) -> Result, Error> { match self.data.get(identifier) { - Some(inherent) => - I::decode(&mut &inherent[..]) - .map_err(|e| Error::DecodingFailed(e, *identifier)) - .map(Some), - None => Ok(None) + Some(inherent) => I::decode(&mut &inherent[..]) + .map_err(|e| Error::DecodingFailed(e, *identifier)) + .map(Some), + None => Ok(None), } } @@ -292,11 +288,7 @@ pub struct CheckInherentsResult { impl Default for CheckInherentsResult { fn default() -> Self { - Self { - okay: true, - errors: InherentData::new(), - fatal_error: false, - } + Self { okay: true, errors: InherentData::new(), fatal_error: false } } } @@ -370,8 +362,8 @@ impl CheckInherentsResult { impl PartialEq for CheckInherentsResult { fn eq(&self, other: &Self) -> bool { self.fatal_error == other.fatal_error && - self.okay == other.okay && - self.errors.data == other.errors.data + self.okay == other.okay && + self.errors.data == other.errors.data } } @@ -407,7 +399,7 @@ impl IsFatalError for MakeFatalError { #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode}; + use codec::{Decode, Encode}; const TEST_INHERENT_0: InherentIdentifier = *b"testinh0"; const TEST_INHERENT_1: InherentIdentifier = *b"testinh1"; @@ -470,10 +462,7 @@ mod tests { let inherent_data = provider.create_inherent_data().unwrap(); - assert_eq!( - inherent_data.get_data::(&TEST_INHERENT_0).unwrap().unwrap(), - 42u32, - ); + assert_eq!(inherent_data.get_data::(&TEST_INHERENT_0).unwrap().unwrap(), 42u32,); } #[test] diff --git a/substrate/primitives/io/src/batch_verifier.rs b/substrate/primitives/io/src/batch_verifier.rs index 341df36c556492a998359bfcdc4503db54e39a10..b6da1d85907bdfa317b1f221f14e797166dfe820 100644 --- a/substrate/primitives/io/src/batch_verifier.rs +++ b/substrate/primitives/io/src/batch_verifier.rs @@ -17,9 +17,12 @@ //! Batch/parallel verification. -use sp_core::{ed25519, sr25519, ecdsa, crypto::Pair, traits::SpawnNamed}; -use std::sync::{Arc, atomic::{AtomicBool, Ordering as AtomicOrdering}}; -use futures::{future::FutureExt, channel::oneshot}; +use futures::{channel::oneshot, future::FutureExt}; +use sp_core::{crypto::Pair, ecdsa, ed25519, sr25519, traits::SpawnNamed}; +use std::sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, +}; #[derive(Debug, Clone)] struct Sr25519BatchItem { @@ -61,7 +64,9 @@ impl BatchVerifier { name: &'static str, ) -> bool { // there is already invalid transaction encountered - if self.invalid.load(AtomicOrdering::Relaxed) { return false; } + if self.invalid.load(AtomicOrdering::Relaxed) { + return false + } let invalid_clone = self.invalid.clone(); let (sender, receiver) = oneshot::channel(); @@ -78,7 +83,8 @@ impl BatchVerifier { log::warn!("Verification halted while result was pending"); invalid_clone.store(true, AtomicOrdering::Relaxed); } - }.boxed(), + } + .boxed(), ); true @@ -110,7 +116,9 @@ impl BatchVerifier { pub_key: sr25519::Public, message: Vec, ) -> bool { - if self.invalid.load(AtomicOrdering::Relaxed) { return false; } + if self.invalid.load(AtomicOrdering::Relaxed) { + return false + } self.sr25519_items.push(Sr25519BatchItem { signature, pub_key, message }); if self.sr25519_items.len() >= 128 { @@ -163,7 +171,7 @@ impl BatchVerifier { ); if !Self::verify_sr25519_batch(std::mem::take(&mut self.sr25519_items)) { - return false; + return false } if pending.len() > 0 { @@ -172,10 +180,12 @@ impl BatchVerifier { "substrate_batch_verify_join", async move { futures::future::join_all(pending).await; - sender.send(()) - .expect("Channel never panics if receiver is live. \ - Receiver is always live until received this data; qed. "); - }.boxed(), + sender.send(()).expect( + "Channel never panics if receiver is live. \ + Receiver is always live until received this data; qed. ", + ); + } + .boxed(), ); if receiver.recv().is_err() { @@ -184,7 +194,7 @@ impl BatchVerifier { "Haven't received async result from verification task. Returning false.", ); - return false; + return false } } diff --git a/substrate/primitives/io/src/lib.rs b/substrate/primitives/io/src/lib.rs index 6fb25df3d02a5fccab271461ac0c87973db73d98..d1aa9c489491ec46fa95d8f9cbb2d67e22a22ae2 100644 --- a/substrate/primitives/io/src/lib.rs +++ b/substrate/primitives/io/src/lib.rs @@ -18,14 +18,16 @@ //! I/O host interface for substrate runtime. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc_error_handler))] - -#![cfg_attr(feature = "std", - doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), - doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] use sp_std::vec::Vec; @@ -35,31 +37,35 @@ use tracing; #[cfg(feature = "std")] use sp_core::{ crypto::Pair, - traits::{TaskExecutorExt, RuntimeSpawnExt}, - offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, hexdisplay::HexDisplay, + offchain::{OffchainDbExt, OffchainWorkerExt, TransactionPoolExt}, storage::ChildInfo, + traits::{RuntimeSpawnExt, TaskExecutorExt}, }; #[cfg(feature = "std")] use sp_keystore::{KeystoreExt, SyncCryptoStore}; use sp_core::{ - OpaquePeerId, crypto::KeyTypeId, ed25519, sr25519, ecdsa, H256, LogLevel, LogLevelFilter, + crypto::KeyTypeId, + ecdsa, ed25519, offchain::{ - Timestamp, HttpRequestId, HttpRequestStatus, HttpError, StorageKind, OpaqueNetworkState, + HttpError, HttpRequestId, HttpRequestStatus, OpaqueNetworkState, StorageKind, Timestamp, }, + sr25519, LogLevel, LogLevelFilter, OpaquePeerId, H256, }; #[cfg(feature = "std")] -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; -use sp_runtime_interface::{runtime_interface, Pointer}; -use sp_runtime_interface::pass_by::{PassBy, PassByCodec}; +use sp_runtime_interface::{ + pass_by::{PassBy, PassByCodec}, + runtime_interface, Pointer, +}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use sp_externalities::{ExternalitiesExt, Externalities}; +use sp_externalities::{Externalities, ExternalitiesExt}; #[cfg(feature = "std")] mod batch_verifier; @@ -167,7 +173,6 @@ pub trait Storage { } } - /// Append the encoded `value` to the storage item at `key`. /// /// The storage item needs to implement [`EncodeAppend`](codec::EncodeAppend). @@ -255,11 +260,7 @@ pub trait DefaultChildStorage { /// /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the parent trie. /// Result is `None` if the value for `key` in the child storage can not be found. - fn get( - &self, - storage_key: &[u8], - key: &[u8], - ) -> Option> { + fn get(&self, storage_key: &[u8], key: &[u8]) -> Option> { let child_info = ChildInfo::new_default(storage_key); self.child_storage(&child_info, key).map(|s| s.to_vec()) } @@ -279,25 +280,19 @@ pub trait DefaultChildStorage { value_offset: u32, ) -> Option { let child_info = ChildInfo::new_default(storage_key); - self.child_storage(&child_info, key) - .map(|value| { - let value_offset = value_offset as usize; - let data = &value[value_offset.min(value.len())..]; - let written = std::cmp::min(data.len(), value_out.len()); - value_out[..written].copy_from_slice(&data[..written]); - data.len() as u32 - }) + self.child_storage(&child_info, key).map(|value| { + let value_offset = value_offset as usize; + let data = &value[value_offset.min(value.len())..]; + let written = std::cmp::min(data.len(), value_out.len()); + value_out[..written].copy_from_slice(&data[..written]); + data.len() as u32 + }) } /// Set a child storage value. /// /// Set `key` to `value` in the child storage denoted by `storage_key`. - fn set( - &mut self, - storage_key: &[u8], - key: &[u8], - value: &[u8], - ) { + fn set(&mut self, storage_key: &[u8], key: &[u8], value: &[u8]) { let child_info = ChildInfo::new_default(storage_key); self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } @@ -305,11 +300,7 @@ pub trait DefaultChildStorage { /// Clear a child storage key. /// /// For the default child storage at `storage_key`, clear value at `key`. - fn clear( - &mut self, - storage_key: &[u8], - key: &[u8], - ) { + fn clear(&mut self, storage_key: &[u8], key: &[u8]) { let child_info = ChildInfo::new_default(storage_key); self.clear_child_storage(&child_info, key); } @@ -318,10 +309,7 @@ pub trait DefaultChildStorage { /// /// If it exists, the child storage for `storage_key` /// is removed. - fn storage_kill( - &mut self, - storage_key: &[u8], - ) { + fn storage_kill(&mut self, storage_key: &[u8]) { let child_info = ChildInfo::new_default(storage_key); self.kill_child_storage(&child_info, None); } @@ -352,11 +340,7 @@ pub trait DefaultChildStorage { /// Check a child storage key. /// /// Check whether the given `key` exists in default child defined at `storage_key`. - fn exists( - &self, - storage_key: &[u8], - key: &[u8], - ) -> bool { + fn exists(&self, storage_key: &[u8], key: &[u8]) -> bool { let child_info = ChildInfo::new_default(storage_key); self.exists_child_storage(&child_info, key) } @@ -364,11 +348,7 @@ pub trait DefaultChildStorage { /// Clear child default key by prefix. /// /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. - fn clear_prefix( - &mut self, - storage_key: &[u8], - prefix: &[u8], - ) { + fn clear_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) { let child_info = ChildInfo::new_default(storage_key); let _ = self.clear_child_prefix(&child_info, prefix, None); } @@ -397,10 +377,7 @@ pub trait DefaultChildStorage { /// The hashing algorithm is defined by the `Block`. /// /// Returns a `Vec` that holds the SCALE encoded hash. - fn root( - &mut self, - storage_key: &[u8], - ) -> Vec { + fn root(&mut self, storage_key: &[u8]) -> Vec { let child_info = ChildInfo::new_default(storage_key); self.child_storage_root(&child_info) } @@ -408,11 +385,7 @@ pub trait DefaultChildStorage { /// Child storage key iteration. /// /// Get the next key in storage after the given one in lexicographic order in child storage. - fn next_key( - &mut self, - storage_key: &[u8], - key: &[u8], - ) -> Option> { + fn next_key(&mut self, storage_key: &[u8], key: &[u8]) -> Option> { let child_info = ChildInfo::new_default(storage_key); self.next_child_storage_key(&child_info, key) } @@ -447,7 +420,8 @@ pub trait Trie { &root, proof, &[(key, Some(value))], - ).is_ok() + ) + .is_ok() } /// Verify trie proof @@ -456,7 +430,8 @@ pub trait Trie { &root, proof, &[(key, Some(value))], - ).is_ok() + ) + .is_ok() } } @@ -516,7 +491,7 @@ pub trait Misc { err, ); None - } + }, } } } @@ -526,7 +501,8 @@ pub trait Misc { pub trait Crypto { /// Returns all `ed25519` public keys for the given key id from the keystore. fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::ed25519_public_keys(keystore, id) } @@ -539,7 +515,8 @@ pub trait Crypto { /// Returns the public key. fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> ed25519::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::ed25519_generate_new(keystore, id, seed) .expect("`ed25519_generate` failed") @@ -555,7 +532,8 @@ pub trait Crypto { pub_key: &ed25519::Public, msg: &[u8], ) -> Option { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) .ok() @@ -566,11 +544,7 @@ pub trait Crypto { /// Verify `ed25519` signature. /// /// Returns `true` when the verification was successful. - fn ed25519_verify( - sig: &ed25519::Signature, - msg: &[u8], - pub_key: &ed25519::Public, - ) -> bool { + fn ed25519_verify(sig: &ed25519::Signature, msg: &[u8], pub_key: &ed25519::Public) -> bool { ed25519::Pair::verify(sig, msg, pub_key) } @@ -588,20 +562,16 @@ pub trait Crypto { msg: &[u8], pub_key: &ed25519::Public, ) -> bool { - self.extension::().map( - |extension| extension.push_ed25519(sig.clone(), pub_key.clone(), msg.to_vec()) - ).unwrap_or_else(|| ed25519_verify(sig, msg, pub_key)) + self.extension::() + .map(|extension| extension.push_ed25519(sig.clone(), pub_key.clone(), msg.to_vec())) + .unwrap_or_else(|| ed25519_verify(sig, msg, pub_key)) } /// Verify `sr25519` signature. /// /// Returns `true` when the verification was successful. #[version(2)] - fn sr25519_verify( - sig: &sr25519::Signature, - msg: &[u8], - pub_key: &sr25519::Public, - ) -> bool { + fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pub_key: &sr25519::Public) -> bool { sr25519::Pair::verify(sig, msg, pub_key) } @@ -619,14 +589,15 @@ pub trait Crypto { msg: &[u8], pub_key: &sr25519::Public, ) -> bool { - self.extension::().map( - |extension| extension.push_sr25519(sig.clone(), pub_key.clone(), msg.to_vec()) - ).unwrap_or_else(|| sr25519_verify(sig, msg, pub_key)) + self.extension::() + .map(|extension| extension.push_sr25519(sig.clone(), pub_key.clone(), msg.to_vec())) + .unwrap_or_else(|| sr25519_verify(sig, msg, pub_key)) } /// Start verification extension. fn start_batch_verify(&mut self) { - let scheduler = self.extension::() + let scheduler = self + .extension::() .expect("No task executor associated with the current context!") .clone(); @@ -641,7 +612,8 @@ pub trait Crypto { /// /// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called). fn finish_batch_verify(&mut self) -> bool { - let result = self.extension::() + let result = self + .extension::() .expect("`finish_batch_verify` should only be called after `start_batch_verify`") .verify_and_clear(); @@ -653,7 +625,8 @@ pub trait Crypto { /// Returns all `sr25519` public keys for the given key id from the keystore. fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &*** self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sr25519_public_keys(keystore, id) } @@ -666,7 +639,8 @@ pub trait Crypto { /// Returns the public key. fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> sr25519::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sr25519_generate_new(keystore, id, seed) .expect("`sr25519_generate` failed") @@ -682,7 +656,8 @@ pub trait Crypto { pub_key: &sr25519::Public, msg: &[u8], ) -> Option { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) .ok() @@ -700,7 +675,8 @@ pub trait Crypto { /// Returns all `ecdsa` public keys for the given key id from the keystore. fn ecdsa_public_keys(&mut self, id: KeyTypeId) -> Vec { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::ecdsa_public_keys(keystore, id) } @@ -713,10 +689,10 @@ pub trait Crypto { /// Returns the public key. fn ecdsa_generate(&mut self, id: KeyTypeId, seed: Option>) -> ecdsa::Public { let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); - SyncCryptoStore::ecdsa_generate_new(keystore, id, seed) - .expect("`ecdsa_generate` failed") + SyncCryptoStore::ecdsa_generate_new(keystore, id, seed).expect("`ecdsa_generate` failed") } /// Sign the given `msg` with the `ecdsa` key that corresponds to the given public key and @@ -729,7 +705,8 @@ pub trait Crypto { pub_key: &ecdsa::Public, msg: &[u8], ) -> Option { - let keystore = &***self.extension::() + let keystore = &***self + .extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) .ok() @@ -740,11 +717,7 @@ pub trait Crypto { /// Verify `ecdsa` signature. /// /// Returns `true` when the verification was successful. - fn ecdsa_verify( - sig: &ecdsa::Signature, - msg: &[u8], - pub_key: &ecdsa::Public, - ) -> bool { + fn ecdsa_verify(sig: &ecdsa::Signature, msg: &[u8], pub_key: &ecdsa::Public) -> bool { ecdsa::Pair::verify(sig, msg, pub_key) } @@ -762,9 +735,9 @@ pub trait Crypto { msg: &[u8], pub_key: &ecdsa::Public, ) -> bool { - self.extension::().map( - |extension| extension.push_ecdsa(sig.clone(), pub_key.clone(), msg.to_vec()) - ).unwrap_or_else(|| ecdsa_verify(sig, msg, pub_key)) + self.extension::() + .map(|extension| extension.push_ecdsa(sig.clone(), pub_key.clone(), msg.to_vec())) + .unwrap_or_else(|| ecdsa_verify(sig, msg, pub_key)) } /// Verify and recover a SECP256k1 ECDSA signature. @@ -778,10 +751,11 @@ pub trait Crypto { sig: &[u8; 65], msg: &[u8; 32], ) -> Result<[u8; 64], EcdsaVerifyError> { - let rs = secp256k1::Signature::parse_slice(&sig[0..64]) - .map_err(|_| EcdsaVerifyError::BadRS)?; - let v = secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) - .map_err(|_| EcdsaVerifyError::BadV)?; + let rs = + secp256k1::Signature::parse_slice(&sig[0..64]).map_err(|_| EcdsaVerifyError::BadRS)?; + let v = + secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) + .map_err(|_| EcdsaVerifyError::BadV)?; let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) .map_err(|_| EcdsaVerifyError::BadSignature)?; let mut res = [0u8; 64]; @@ -799,10 +773,11 @@ pub trait Crypto { sig: &[u8; 65], msg: &[u8; 32], ) -> Result<[u8; 33], EcdsaVerifyError> { - let rs = secp256k1::Signature::parse_slice(&sig[0..64]) - .map_err(|_| EcdsaVerifyError::BadRS)?; - let v = secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) - .map_err(|_| EcdsaVerifyError::BadV)?; + let rs = + secp256k1::Signature::parse_slice(&sig[0..64]).map_err(|_| EcdsaVerifyError::BadRS)?; + let v = + secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) + .map_err(|_| EcdsaVerifyError::BadV)?; let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) .map_err(|_| EcdsaVerifyError::BadSignature)?; Ok(pubkey.serialize_compressed()) @@ -907,8 +882,10 @@ pub trait Offchain { /// The transaction will end up in the pool. fn submit_transaction(&mut self, data: Vec) -> Result<(), ()> { self.extension::() - .expect("submit_transaction can be called only in the offchain call context with - TransactionPool capabilities enabled") + .expect( + "submit_transaction can be called only in the offchain call context with + TransactionPool capabilities enabled", + ) .submit_transaction(data) } @@ -949,8 +926,10 @@ pub trait Offchain { /// offchain worker tasks running on the same machine. It IS persisted between runs. fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { self.extension::() - .expect("local_storage_set can be called only in the offchain call context with - OffchainDb extension") + .expect( + "local_storage_set can be called only in the offchain call context with + OffchainDb extension", + ) .local_storage_set(kind, key, value) } @@ -960,8 +939,10 @@ pub trait Offchain { /// offchain worker tasks running on the same machine. It IS persisted between runs. fn local_storage_clear(&mut self, kind: StorageKind, key: &[u8]) { self.extension::() - .expect("local_storage_clear can be called only in the offchain call context with - OffchainDb extension") + .expect( + "local_storage_clear can be called only in the offchain call context with + OffchainDb extension", + ) .local_storage_clear(kind, key) } @@ -982,14 +963,11 @@ pub trait Offchain { new_value: &[u8], ) -> bool { self.extension::() - .expect("local_storage_compare_and_set can be called only in the offchain call context - with OffchainDb extension") - .local_storage_compare_and_set( - kind, - key, - old_value.as_deref(), - new_value, + .expect( + "local_storage_compare_and_set can be called only in the offchain call context + with OffchainDb extension", ) + .local_storage_compare_and_set(kind, key, old_value.as_deref(), new_value) } /// Gets a value from the local storage. @@ -999,8 +977,10 @@ pub trait Offchain { /// offchain worker tasks running on the same machine. It IS persisted between runs. fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { self.extension::() - .expect("local_storage_get can be called only in the offchain call context with - OffchainDb extension") + .expect( + "local_storage_get can be called only in the offchain call context with + OffchainDb extension", + ) .local_storage_get(kind, key) } @@ -1128,12 +1108,7 @@ pub trait Logging { /// Instead of using directly, prefer setting up `RuntimeLogger` and using `log` macros. fn log(level: LogLevel, target: &str, message: &[u8]) { if let Ok(message) = std::str::from_utf8(message) { - log::log!( - target: target, - log::Level::from(level), - "{}", - message, - ) + log::log!(target: target, log::Level::from(level), "{}", message,) } } @@ -1153,7 +1128,6 @@ impl PassBy for Crossing { } impl Crossing { - /// Convert into the inner type pub fn into_inner(self) -> T { self.0 @@ -1162,12 +1136,12 @@ impl Crossing { // useful for testing impl core::default::Default for Crossing - where T: core::default::Default + Encode + Decode +where + T: core::default::Default + Encode + Decode, { fn default() -> Self { Self(Default::default()) } - } /// Interface to provide tracing facilities for wasm. Modelled after tokios `tracing`-crate @@ -1184,9 +1158,7 @@ pub trait WasmTracing { /// chose to cache the result for the execution of the entire block. fn enabled(&mut self, metadata: Crossing) -> bool { let metadata: &tracing_core::metadata::Metadata<'static> = (&metadata.into_inner()).into(); - tracing::dispatcher::get_default(|d| { - d.enabled(metadata) - }) + tracing::dispatcher::get_default(|d| d.enabled(metadata)) } /// Open a new span with the given attributes. Return the u64 Id of the span. @@ -1205,9 +1177,7 @@ pub trait WasmTracing { d.enter(&final_id); final_id.into_u64() }), - _ => { - 0 - } + _ => 0, } } @@ -1226,19 +1196,18 @@ pub trait WasmTracing { } } -#[cfg(all(not(feature="std"), feature="with-tracing"))] +#[cfg(all(not(feature = "std"), feature = "with-tracing"))] mod tracing_setup { + use super::{wasm_tracing, Crossing}; use core::sync::atomic::{AtomicBool, Ordering}; use tracing_core::{ - dispatcher::{Dispatch, set_global_default}, - span::{Id, Record, Attributes}, - Metadata, Event, + dispatcher::{set_global_default, Dispatch}, + span::{Attributes, Id, Record}, + Event, Metadata, }; - use super::{wasm_tracing, Crossing}; static TRACING_SET: AtomicBool = AtomicBool::new(false); - /// The PassingTracingSubscriber implements `tracing_core::Subscriber` /// and pushes the information across the runtime interface to the host struct PassingTracingSubsciber; @@ -1256,12 +1225,12 @@ mod tracing_setup { /// Not implemented! We do not support recording values later /// Will panic when used. fn record(&self, span: &Id, values: &Record<'_>) { - unimplemented!{} // this usage is not supported + unimplemented! {} // this usage is not supported } /// Not implemented! We do not support recording values later /// Will panic when used. fn record_follows_from(&self, span: &Id, follows: &Id) { - unimplemented!{ } // this usage is not supported + unimplemented! {} // this usage is not supported } fn event(&self, event: &Event<'_>) { wasm_tracing::event(Crossing(event.into())) @@ -1271,7 +1240,6 @@ mod tracing_setup { } } - /// Initialize tracing of sp_tracing on wasm with `with-tracing` enabled. /// Can be called multiple times from within the same process and will only /// set the global bridging subscriber once. @@ -1284,11 +1252,11 @@ mod tracing_setup { } } -#[cfg(not(all(not(feature="std"), feature="with-tracing")))] +#[cfg(not(all(not(feature = "std"), feature = "with-tracing")))] mod tracing_setup { /// Initialize tracing of sp_tracing not necessary – noop. To enable build /// without std and with the `with-tracing`-feature. - pub fn init_tracing() { } + pub fn init_tracing() {} } pub use tracing_setup::init_tracing; @@ -1319,14 +1287,16 @@ pub trait Sandbox { return_val_len: u32, state_ptr: Pointer, ) -> u32 { - self.sandbox().invoke( - instance_idx, - &function, - &args, - return_val_ptr, - return_val_len, - state_ptr.into(), - ).expect("Failed to invoke function with sandbox") + self.sandbox() + .invoke( + instance_idx, + &function, + &args, + return_val_ptr, + return_val_len, + state_ptr.into(), + ) + .expect("Failed to invoke function with sandbox") } /// Create a new memory instance with the given `initial` and `maximum` size. @@ -1364,20 +1334,30 @@ pub trait Sandbox { /// Teardown the memory instance with the given `memory_idx`. fn memory_teardown(&mut self, memory_idx: u32) { - self.sandbox().memory_teardown(memory_idx).expect("Failed to teardown memory with sandbox") + self.sandbox() + .memory_teardown(memory_idx) + .expect("Failed to teardown memory with sandbox") } /// Teardown the sandbox instance with the given `instance_idx`. fn instance_teardown(&mut self, instance_idx: u32) { - self.sandbox().instance_teardown(instance_idx).expect("Failed to teardown sandbox instance") + self.sandbox() + .instance_teardown(instance_idx) + .expect("Failed to teardown sandbox instance") } /// Get the value from a global with the given `name`. The sandbox is determined by the given /// `instance_idx`. /// /// Returns `Some(_)` when the requested global variable could be found. - fn get_global_val(&mut self, instance_idx: u32, name: &str) -> Option { - self.sandbox().get_global_val(instance_idx, name).expect("Failed to get global from sandbox") + fn get_global_val( + &mut self, + instance_idx: u32, + name: &str, + ) -> Option { + self.sandbox() + .get_global_val(instance_idx, name) + .expect("Failed to get global from sandbox") } } @@ -1390,11 +1370,13 @@ pub trait RuntimeTasks { /// /// This should not be used directly. Use `sp_tasks::spawn` instead. fn spawn(dispatcher_ref: u32, entry: u32, payload: Vec) -> u64 { - sp_externalities::with_externalities(|mut ext|{ - let runtime_spawn = ext.extension::() + sp_externalities::with_externalities(|mut ext| { + let runtime_spawn = ext + .extension::() .expect("Cannot spawn without dynamic runtime dispatcher (RuntimeSpawnExt)"); runtime_spawn.spawn_call(dispatcher_ref, entry, payload) - }).expect("`RuntimeTasks::spawn`: called outside of externalities context") + }) + .expect("`RuntimeTasks::spawn`: called outside of externalities context") } /// Wasm host function for joining a task. @@ -1402,12 +1384,14 @@ pub trait RuntimeTasks { /// This should not be used directly. Use `join` of `sp_tasks::spawn` result instead. fn join(handle: u64) -> Vec { sp_externalities::with_externalities(|mut ext| { - let runtime_spawn = ext.extension::() + let runtime_spawn = ext + .extension::() .expect("Cannot join without dynamic runtime dispatcher (RuntimeSpawnExt)"); runtime_spawn.join(handle) - }).expect("`RuntimeTasks::join`: called outside of externalities context") + }) + .expect("`RuntimeTasks::join`: called outside of externalities context") } - } +} /// Allocator used by Substrate when executing the Wasm runtime. #[cfg(not(feature = "std"))] @@ -1483,10 +1467,8 @@ pub type SubstrateHostFunctions = ( #[cfg(test)] mod tests { use super::*; + use sp_core::{map, storage::Storage, testing::TaskExecutor, traits::TaskExecutorExt}; use sp_state_machine::BasicExternalities; - use sp_core::{ - storage::Storage, map, traits::TaskExecutorExt, testing::TaskExecutor, - }; use std::any::TypeId; #[test] @@ -1542,7 +1524,10 @@ mod tests { }); t.execute_with(|| { - assert!(matches!(storage::clear_prefix(b":abc", None), KillStorageResult::AllRemoved(2))); + assert!(matches!( + storage::clear_prefix(b":abc", None), + KillStorageResult::AllRemoved(2) + )); assert!(storage::get(b":a").is_some()); assert!(storage::get(b":abdd").is_some()); @@ -1583,11 +1568,7 @@ mod tests { } // push invlaid - crypto::sr25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::sr25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); crypto::start_batch_verify(); @@ -1607,11 +1588,7 @@ mod tests { ext.execute_with(|| { // invalid ed25519 signature crypto::start_batch_verify(); - crypto::ed25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::ed25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); // 2 valid ed25519 signatures @@ -1637,11 +1614,7 @@ mod tests { let signature = pair.sign(msg); crypto::ed25519_batch_verify(&signature, msg, &pair.public()); - crypto::ed25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::ed25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); @@ -1673,11 +1646,7 @@ mod tests { let signature = pair.sign(msg); crypto::sr25519_batch_verify(&signature, msg, &pair.public()); - crypto::sr25519_batch_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + crypto::sr25519_batch_verify(&Default::default(), &Vec::new(), &Default::default()); assert!(!crypto::finish_batch_verify()); }); diff --git a/substrate/primitives/keyring/src/ed25519.rs b/substrate/primitives/keyring/src/ed25519.rs index c9dd70d63d5c9e160cb2fba02bffd09208ea488c..65341a360579b446be9275a7da7a1ff8540fa9df 100644 --- a/substrate/primitives/keyring/src/ed25519.rs +++ b/substrate/primitives/keyring/src/ed25519.rs @@ -17,11 +17,14 @@ //! Support code for the runtime. A set of test accounts. -use std::{collections::HashMap, ops::Deref}; use lazy_static::lazy_static; -use sp_core::{ed25519::{Pair, Public, Signature}, Pair as PairT, Public as PublicT, H256}; pub use sp_core::ed25519; +use sp_core::{ + ed25519::{Pair, Public, Signature}, + Pair as PairT, Public as PublicT, H256, +}; use sp_runtime::AccountId32; +use std::{collections::HashMap, ops::Deref}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] @@ -79,7 +82,7 @@ impl Keyring { } /// Returns an iterator over all test accounts. - pub fn iter() -> impl Iterator { + pub fn iter() -> impl Iterator { ::iter() } @@ -114,13 +117,10 @@ impl From for sp_runtime::MultiSigner { } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - Keyring::iter().map(|i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = + Keyring::iter().map(|i| (i, i.pair())).collect(); + static ref PUBLIC_KEYS: HashMap = + PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect(); } impl From for Public { @@ -185,26 +185,20 @@ mod tests { #[test] fn should_work() { - assert!( - Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Bob!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Bob.public(), - ) - ); + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); } } diff --git a/substrate/primitives/keyring/src/sr25519.rs b/substrate/primitives/keyring/src/sr25519.rs index a4f43be07f07d07715dc38e7700fbfe795136642..6a7aa3635a43ad257863634f15c43b6d69dc1121 100644 --- a/substrate/primitives/keyring/src/sr25519.rs +++ b/substrate/primitives/keyring/src/sr25519.rs @@ -17,12 +17,14 @@ //! Support code for the runtime. A set of test accounts. -use std::collections::HashMap; -use std::ops::Deref; use lazy_static::lazy_static; -use sp_core::{sr25519::{Pair, Public, Signature}, Pair as PairT, Public as PublicT, H256}; pub use sp_core::sr25519; +use sp_core::{ + sr25519::{Pair, Public, Signature}, + Pair as PairT, Public as PublicT, H256, +}; use sp_runtime::AccountId32; +use std::{collections::HashMap, ops::Deref}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] @@ -80,7 +82,7 @@ impl Keyring { } /// Returns an iterator over all test accounts. - pub fn iter() -> impl Iterator { + pub fn iter() -> impl Iterator { ::iter() } @@ -135,19 +137,16 @@ impl std::str::FromStr for Keyring { "ferdie" => Ok(Keyring::Ferdie), "one" => Ok(Keyring::One), "two" => Ok(Keyring::Two), - _ => Err(ParseKeyringError) + _ => Err(ParseKeyringError), } } } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - Keyring::iter().map(|i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = + Keyring::iter().map(|i| (i, i.pair())).collect(); + static ref PUBLIC_KEYS: HashMap = + PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect(); } impl From for AccountId32 { @@ -212,26 +211,20 @@ mod tests { #[test] fn should_work() { - assert!( - Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Bob!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Bob.public(), - ) - ); + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); } } diff --git a/substrate/primitives/keystore/src/lib.rs b/substrate/primitives/keystore/src/lib.rs index cccb390d34ba1a9e7b4b3ad96e0e91f8b8fae288..c45e8a6f5d2be14725a9bf359ac9137d0ee1ddef 100644 --- a/substrate/primitives/keystore/src/lib.rs +++ b/substrate/primitives/keystore/src/lib.rs @@ -19,30 +19,30 @@ pub mod testing; pub mod vrf; -use std::sync::Arc; +use crate::vrf::{VRFSignature, VRFTranscriptData}; use async_trait::async_trait; use futures::{executor::block_on, future::join_all}; use sp_core::{ - crypto::{KeyTypeId, CryptoTypePublicPair}, - ed25519, sr25519, ecdsa, + crypto::{CryptoTypePublicPair, KeyTypeId}, + ecdsa, ed25519, sr25519, }; -use crate::vrf::{VRFTranscriptData, VRFSignature}; +use std::sync::Arc; /// CryptoStore error #[derive(Debug, derive_more::Display)] pub enum Error { /// Public key type is not supported - #[display(fmt="Key not supported: {:?}", _0)] + #[display(fmt = "Key not supported: {:?}", _0)] KeyNotSupported(KeyTypeId), /// Validation error - #[display(fmt="Validation error: {}", _0)] + #[display(fmt = "Validation error: {}", _0)] ValidationError(String), /// Keystore unavailable - #[display(fmt="Keystore unavailable")] + #[display(fmt = "Keystore unavailable")] Unavailable, /// Programming errors - #[display(fmt="An unknown keystore error occurred: {}", _0)] - Other(String) + #[display(fmt = "An unknown keystore error occurred: {}", _0)] + Other(String), } /// Something that generates, stores and provides access to keys. @@ -91,12 +91,7 @@ pub trait CryptoStore: Send + Sync { /// Places it into the file system store. /// /// `Err` if there's some sort of weird filesystem error, but should generally be `Ok`. - async fn insert_unknown( - &self, - id: KeyTypeId, - suri: &str, - public: &[u8] - ) -> Result<(), ()>; + async fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()>; /// Find intersection between provided keys and supported keys /// @@ -105,7 +100,7 @@ pub trait CryptoStore: Send + Sync { async fn supported_keys( &self, id: KeyTypeId, - keys: Vec + keys: Vec, ) -> Result, Error>; /// List all supported keys /// @@ -142,14 +137,14 @@ pub trait CryptoStore: Send + Sync { &self, id: KeyTypeId, keys: Vec, - msg: &[u8] + msg: &[u8], ) -> Result)>, Error> { if keys.len() == 1 { - return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))); + return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))) } else { for k in self.supported_keys(id, keys).await? { if let Ok(Some(sign)) = self.sign_with(id, &k, msg).await { - return Ok(Some((k, sign))); + return Ok(Some((k, sign))) } } } @@ -170,8 +165,7 @@ pub trait CryptoStore: Send + Sync { keys: Vec, msg: &[u8], ) -> Result>, Error>>, ()> { - let futs = keys.iter() - .map(|k| self.sign_with(id, k, msg)); + let futs = keys.iter().map(|k| self.sign_with(id, k, msg)); Ok(join_all(futs).await) } @@ -202,8 +196,8 @@ pub trait CryptoStore: Send + Sync { /// in turn, used for signing the provided pre-hashed message. /// /// The `msg` argument provided should be a hashed message for which an - /// ECDSA signature should be generated. - /// + /// ECDSA signature should be generated. + /// /// Returns an [`ecdsa::Signature`] or `None` in case the given `id` and /// `public` combination doesn't exist in the keystore. An `Err` will be /// returned if generating the signature itself failed. @@ -260,11 +254,8 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// If the given seed is `Some(_)`, the key pair will only be stored in memory. /// /// Returns the public key of the generated key pair. - fn ecdsa_generate_new( - &self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; + fn ecdsa_generate_new(&self, id: KeyTypeId, seed: Option<&str>) + -> Result; /// Insert a new key. This doesn't require any known of the crypto; but a public key must be /// manually provided. @@ -281,7 +272,7 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { fn supported_keys( &self, id: KeyTypeId, - keys: Vec + keys: Vec, ) -> Result, Error>; /// List all supported keys @@ -321,16 +312,16 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { &self, id: KeyTypeId, keys: Vec, - msg: &[u8] + msg: &[u8], ) -> Result)>, Error> { if keys.len() == 1 { return Ok( - SyncCryptoStore::sign_with(self, id, &keys[0], msg)?.map(|s| (keys[0].clone(), s)), + SyncCryptoStore::sign_with(self, id, &keys[0], msg)?.map(|s| (keys[0].clone(), s)) ) } else { for k in SyncCryptoStore::supported_keys(self, id, keys)? { if let Ok(Some(sign)) = SyncCryptoStore::sign_with(self, id, &k, msg) { - return Ok(Some((k, sign))); + return Ok(Some((k, sign))) } } } @@ -380,8 +371,8 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// in turn, used for signing the provided pre-hashed message. /// /// The `msg` argument provided should be a hashed message for which an - /// ECDSA signature should be generated. - /// + /// ECDSA signature should be generated. + /// /// Returns an [`ecdsa::Signature`] or `None` in case the given `id` and /// `public` combination doesn't exist in the keystore. An `Err` will be /// returned if generating the signature itself failed. diff --git a/substrate/primitives/keystore/src/testing.rs b/substrate/primitives/keystore/src/testing.rs index 9cc8b8fc64b110778df2a25216720a3afb9bb0bd..718ba798dc0f3ddd40df36ce2792ac07680ec06b 100644 --- a/substrate/primitives/keystore/src/testing.rs +++ b/substrate/primitives/keystore/src/testing.rs @@ -17,19 +17,21 @@ //! Types that should only be used for testing! -use sp_core::crypto::KeyTypeId; use sp_core::{ - crypto::{Pair, Public, CryptoTypePublicPair}, - ed25519, sr25519, ecdsa, + crypto::{CryptoTypePublicPair, KeyTypeId, Pair, Public}, + ecdsa, ed25519, sr25519, }; use crate::{ - {CryptoStore, SyncCryptoStorePtr, Error, SyncCryptoStore}, - vrf::{VRFTranscriptData, VRFSignature, make_transcript}, + vrf::{make_transcript, VRFSignature, VRFTranscriptData}, + CryptoStore, Error, SyncCryptoStore, SyncCryptoStorePtr, }; -use std::{collections::{HashMap, HashSet}, sync::Arc}; -use parking_lot::RwLock; use async_trait::async_trait; +use parking_lot::RwLock; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; /// A keystore implementation usable in tests. #[derive(Default)] @@ -45,29 +47,28 @@ impl KeyStore { } fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { - self.keys.read().get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) - ) + self.keys.read().get(&id).and_then(|inner| { + inner.get(pub_key.as_slice()).map(|s| { + sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") + }) + }) } fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { - self.keys.read().get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) - ) + self.keys.read().get(&id).and_then(|inner| { + inner.get(pub_key.as_slice()).map(|s| { + ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") + }) + }) } fn ecdsa_key_pair(&self, id: KeyTypeId, pub_key: &ecdsa::Public) -> Option { - self.keys.read().get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid")) - ) + self.keys.read().get(&id).and_then(|inner| { + inner + .get(pub_key.as_slice()) + .map(|s| ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid")) + }) } - } #[async_trait] @@ -158,28 +159,32 @@ impl CryptoStore for KeyStore { impl SyncCryptoStore for KeyStore { fn keys(&self, id: KeyTypeId) -> Result, Error> { - self.keys.read() + self.keys + .read() .get(&id) .map(|map| { - Ok(map.keys() - .fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k.clone())); - v - })) + Ok(map.keys().fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ecdsa::CRYPTO_ID, k.clone())); + v + })) }) .unwrap_or_else(|| Ok(vec![])) } fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.read().get(&id) - .map(|keys| + self.keys + .read() + .get(&id) + .map(|keys| { keys.values() - .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) + .map(|s| { + sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") + }) .map(|p| p.public()) .collect() - ) + }) .unwrap_or_default() } @@ -190,27 +195,40 @@ impl SyncCryptoStore for KeyStore { ) -> Result { match seed { Some(seed) => { - let pair = sr25519::Pair::from_string(seed, None) - .map_err(|_| Error::ValidationError("Generates an `sr25519` pair.".to_owned()))?; - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); + let pair = sr25519::Pair::from_string(seed, None).map_err(|_| { + Error::ValidationError("Generates an `sr25519` pair.".to_owned()) + })?; + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, None => { let (pair, phrase, _) = sr25519::Pair::generate_with_phrase(None); - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); Ok(pair.public()) - } + }, } } fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.read().get(&id) - .map(|keys| + self.keys + .read() + .get(&id) + .map(|keys| { keys.values() - .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) + .map(|s| { + ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") + }) .map(|p| p.public()) .collect() - ) + }) .unwrap_or_default() } @@ -221,27 +239,40 @@ impl SyncCryptoStore for KeyStore { ) -> Result { match seed { Some(seed) => { - let pair = ed25519::Pair::from_string(seed, None) - .map_err(|_| Error::ValidationError("Generates an `ed25519` pair.".to_owned()))?; - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); + let pair = ed25519::Pair::from_string(seed, None).map_err(|_| { + Error::ValidationError("Generates an `ed25519` pair.".to_owned()) + })?; + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, None => { let (pair, phrase, _) = ed25519::Pair::generate_with_phrase(None); - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); Ok(pair.public()) - } + }, } } fn ecdsa_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.read().get(&id) - .map(|keys| + self.keys + .read() + .get(&id) + .map(|keys| { keys.values() - .map(|s| ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid")) + .map(|s| { + ecdsa::Pair::from_string(s, None).expect("`ecdsa` seed slice is valid") + }) .map(|p| p.public()) .collect() - ) + }) .unwrap_or_default() } @@ -254,24 +285,38 @@ impl SyncCryptoStore for KeyStore { Some(seed) => { let pair = ecdsa::Pair::from_string(seed, None) .map_err(|_| Error::ValidationError("Generates an `ecdsa` pair.".to_owned()))?; - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, None => { let (pair, phrase, _) = ecdsa::Pair::generate_with_phrase(None); - self.keys.write().entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); + self.keys + .write() + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); Ok(pair.public()) - } + }, } } fn insert_unknown(&self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { - self.keys.write().entry(id).or_default().insert(public.to_owned(), suri.to_string()); + self.keys + .write() + .entry(id) + .or_default() + .insert(public.to_owned(), suri.to_string()); Ok(()) } fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys.iter().all(|(k, t)| self.keys.read().get(&t).and_then(|s| s.get(k)).is_some()) + public_keys + .iter() + .all(|(k, t)| self.keys.read().get(&t).and_then(|s| s.get(k)).is_some()) } fn supported_keys( @@ -295,24 +340,24 @@ impl SyncCryptoStore for KeyStore { match key.0 { ed25519::CRYPTO_ID => { - let key_pair = self - .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())); + let key_pair = + self.ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())); key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - } + }, sr25519::CRYPTO_ID => { - let key_pair = self - .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())); + let key_pair = + self.sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())); key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - } + }, ecdsa::CRYPTO_ID => { - let key_pair = self - .ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice())); + let key_pair = + self.ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice())); key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() - } - _ => Err(Error::KeyNotSupported(id)) + }, + _ => Err(Error::KeyNotSupported(id)), } } @@ -323,17 +368,11 @@ impl SyncCryptoStore for KeyStore { transcript_data: VRFTranscriptData, ) -> Result, Error> { let transcript = make_transcript(transcript_data); - let pair = if let Some(k) = self.sr25519_key_pair(key_type, public) { - k - } else { - return Ok(None) - }; + let pair = + if let Some(k) = self.sr25519_key_pair(key_type, public) { k } else { return Ok(None) }; let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(Some(VRFSignature { - output: inout.to_output(), - proof, - })) + Ok(Some(VRFSignature { output: inout.to_output(), proof })) } fn ecdsa_sign_prehashed( @@ -362,15 +401,18 @@ impl Into> for KeyStore { #[cfg(test)] mod tests { use super::*; - use sp_core::{sr25519, testing::{ED25519, SR25519, ECDSA}}; - use crate::{SyncCryptoStore, vrf::VRFTranscriptValue}; + use crate::{vrf::VRFTranscriptValue, SyncCryptoStore}; + use sp_core::{ + sr25519, + testing::{ECDSA, ED25519, SR25519}, + }; #[test] fn store_key_and_extract() { let store = KeyStore::new(); - let public = SyncCryptoStore::ed25519_generate_new(&store, ED25519, None) - .expect("Generates key"); + let public = + SyncCryptoStore::ed25519_generate_new(&store, ED25519, None).expect("Generates key"); let public_keys = SyncCryptoStore::keys(&store, ED25519).unwrap(); @@ -384,12 +426,8 @@ mod tests { let secret_uri = "//Alice"; let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); - SyncCryptoStore::insert_unknown( - &store, - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); + SyncCryptoStore::insert_unknown(&store, SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); let public_keys = SyncCryptoStore::keys(&store, SR25519).unwrap(); @@ -409,7 +447,7 @@ mod tests { ("one", VRFTranscriptValue::U64(1)), ("two", VRFTranscriptValue::U64(2)), ("three", VRFTranscriptValue::Bytes("test".as_bytes().to_vec())), - ] + ], }; let result = SyncCryptoStore::sr25519_vrf_sign( @@ -420,19 +458,11 @@ mod tests { ); assert!(result.unwrap().is_none()); - SyncCryptoStore::insert_unknown( - &store, - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); + SyncCryptoStore::insert_unknown(&store, SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); - let result = SyncCryptoStore::sr25519_vrf_sign( - &store, - SR25519, - &key_pair.public(), - transcript_data, - ); + let result = + SyncCryptoStore::sr25519_vrf_sign(&store, SR25519, &key_pair.public(), transcript_data); assert!(result.unwrap().is_some()); } @@ -445,16 +475,19 @@ mod tests { let pair = ecdsa::Pair::from_string(suri, None).unwrap(); let msg = sp_core::keccak_256(b"this should be a hashed message"); - + // no key in key store - let res = SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); + let res = + SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); assert!(res.is_none()); // insert key, sign again - let res = SyncCryptoStore::insert_unknown(&store, ECDSA, suri, pair.public().as_ref()).unwrap(); + let res = + SyncCryptoStore::insert_unknown(&store, ECDSA, suri, pair.public().as_ref()).unwrap(); assert_eq!((), res); - let res = SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); - assert!(res.is_some()); + let res = + SyncCryptoStore::ecdsa_sign_prehashed(&store, ECDSA, &pair.public(), &msg).unwrap(); + assert!(res.is_some()); } } diff --git a/substrate/primitives/keystore/src/vrf.rs b/substrate/primitives/keystore/src/vrf.rs index 04286eea82761ca85b5ed0d4ae71b4019563960a..383abb77e17c7dcc21463e9fd6389c238ae5080d 100644 --- a/substrate/primitives/keystore/src/vrf.rs +++ b/substrate/primitives/keystore/src/vrf.rs @@ -59,21 +59,17 @@ pub fn make_transcript(data: VRFTranscriptData) -> Transcript { }, VRFTranscriptValue::U64(val) => { transcript.append_u64(label.as_bytes(), val); - } + }, } } transcript } - #[cfg(test)] mod tests { use super::*; use rand::RngCore; - use rand_chacha::{ - rand_core::SeedableRng, - ChaChaRng, - }; + use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; #[test] fn transcript_creation_matches() { @@ -90,9 +86,7 @@ mod tests { }); let test = |t: Transcript| -> [u8; 16] { let mut b = [0u8; 16]; - t.build_rng() - .finalize(&mut ChaChaRng::from_seed([0u8;32])) - .fill_bytes(&mut b); + t.build_rng().finalize(&mut ChaChaRng::from_seed([0u8; 32])).fill_bytes(&mut b); b }; debug_assert!(test(orig_transcript) == test(new_transcript)); diff --git a/substrate/primitives/maybe-compressed-blob/src/lib.rs b/substrate/primitives/maybe-compressed-blob/src/lib.rs index acd283e747f9fdb6d557b0419ea7d9f2e4f38f5a..4e4a3da0a82c6410f9a1e68cf46e833b10279e5e 100644 --- a/substrate/primitives/maybe-compressed-blob/src/lib.rs +++ b/substrate/primitives/maybe-compressed-blob/src/lib.rs @@ -18,8 +18,7 @@ //! Handling of blobs that may be compressed, based on an 8-byte magic identifier //! at the head. -use std::borrow::Cow; -use std::io::Read; +use std::{borrow::Cow, io::Read}; // An arbitrary prefix, that indicates a blob beginning with should be decompressed with // Zstd compression. @@ -52,7 +51,7 @@ impl std::fmt::Display for Error { } } -impl std::error::Error for Error { } +impl std::error::Error for Error {} fn read_from_decoder( decoder: impl Read, @@ -81,8 +80,8 @@ fn decompress_zstd(blob: &[u8], bomb_limit: usize) -> Result, Error> { #[cfg(target_os = "unknown")] fn decompress_zstd(mut blob: &[u8], bomb_limit: usize) -> Result, Error> { let blob_len = blob.len(); - let decoder = ruzstd::streaming_decoder::StreamingDecoder::new(&mut blob) - .map_err(|_| Error::Invalid)?; + let decoder = + ruzstd::streaming_decoder::StreamingDecoder::new(&mut blob).map_err(|_| Error::Invalid)?; read_from_decoder(decoder, blob_len, bomb_limit) } @@ -105,7 +104,7 @@ pub fn compress(blob: &[u8], bomb_limit: usize) -> Option> { use std::io::Write; if blob.len() > bomb_limit { - return None; + return None } let mut buf = ZSTD_PREFIX.to_vec(); diff --git a/substrate/primitives/npos-elections/benches/phragmen.rs b/substrate/primitives/npos-elections/benches/phragmen.rs index d48c246558844410458a3174254963d5bbfcbe7e..784825924935126b65e9efb18bc626f1cc72e72c 100644 --- a/substrate/primitives/npos-elections/benches/phragmen.rs +++ b/substrate/primitives/npos-elections/benches/phragmen.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - //! Benchmarks of the phragmen election algorithm. //! Note that execution times will not be accurate in an absolute scale, since //! - Everything is executed in the context of `TestExternalities` @@ -27,13 +26,12 @@ use test::Bencher; use rand::{self, Rng}; use sp_npos_elections::{ElectionResult, VoteWeight}; -use std::collections::BTreeMap; -use sp_runtime::{Perbill, PerThing, traits::Zero}; use sp_npos_elections::{ - balance_solution, assignment_ratio_to_staked, to_support_map, to_without_backing, VoteWeight, - ExtendedBalance, Assignment, StakedAssignment, IdentifierT, assignment_ratio_to_staked, - seq_phragmen, + assignment_ratio_to_staked, balance_solution, seq_phragmen, to_support_map, to_without_backing, + Assignment, ExtendedBalance, IdentifierT, StakedAssignment, VoteWeight, }; +use sp_runtime::{traits::Zero, PerThing, Perbill}; +use std::collections::BTreeMap; // default params. Each will be scaled by the benchmarks individually. const VALIDATORS: u64 = 100; @@ -69,15 +67,13 @@ mod bench_closure_and_slice { ratio .into_iter() .zip(stakes.into_iter().map(|x| *x as ExtendedBalance)) - .map(|(a, stake)| { - a.into_staked(stake.into(), true) - }) + .map(|(a, stake)| a.into_staked(stake.into(), true)) .collect() } #[bench] fn closure(b: &mut Bencher) { - let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); + let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; // each have one clone of assignments @@ -86,7 +82,7 @@ mod bench_closure_and_slice { #[bench] fn slice(b: &mut Bencher) { - let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); + let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; b.iter(|| { @@ -112,20 +108,19 @@ fn do_phragmen( let mut candidates = Vec::with_capacity(num_validators as usize); let mut stake_of_tree: BTreeMap = BTreeMap::new(); - (1 ..= num_validators).for_each(|acc| { + (1..=num_validators).for_each(|acc| { candidates.push(acc); stake_of_tree.insert(acc, STAKE + rr(10, 1000)); }); let mut voters = Vec::with_capacity(num_nominators as usize); - (PREFIX ..= (PREFIX + num_nominators)).for_each(|acc| { + (PREFIX..=(PREFIX + num_nominators)).for_each(|acc| { // all possible targets let mut all_targets = candidates.clone(); // we remove and pop into `targets` `edge_per_voter` times. - let targets = (0 .. edge_per_voter).map(|_| { - all_targets.remove(rr(0, all_targets.len()) as usize) - }) - .collect::>(); + let targets = (0..edge_per_voter) + .map(|_| all_targets.remove(rr(0, all_targets.len()) as usize)) + .collect::>(); let stake = STAKE + rr(10, 1000); stake_of_tree.insert(acc, stake); @@ -138,20 +133,16 @@ fn do_phragmen( Zero::zero(), candidates.clone(), voters.clone(), - ).unwrap(); + ) + .unwrap(); - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; + let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; // Do the benchmarking with balancing. if eq_iters > 0 { let staked = assignment_ratio_to_staked(assignments, &stake_of); let winners = to_without_backing(winners); - let mut support = to_support_map( - winners.as_ref(), - staked.as_ref(), - ).unwrap(); + let mut support = to_support_map(winners.as_ref(), staked.as_ref()).unwrap(); balance_solution( staked.into_iter().map(|a| (a.clone(), stake_of(&a.who))).collect(), diff --git a/substrate/primitives/npos-elections/compact/src/assignment.rs b/substrate/primitives/npos-elections/compact/src/assignment.rs index 2c8edefbfb3797b85dd1ad3e1bf916c662ca1bc1..bd5b1bf0c154a332c73f8d19ceb2c8e55519d847 100644 --- a/substrate/primitives/npos-elections/compact/src/assignment.rs +++ b/substrate/primitives/npos-elections/compact/src/assignment.rs @@ -46,25 +46,29 @@ pub(crate) fn from_impl(count: usize) -> TokenStream2 { ),) }; - let from_impl_rest = (3..=count).map(|c| { - let inner = (0..c-1).map(|i| - quote!((index_of_target(&distribution[#i].0).or_invalid_index()?, distribution[#i].1),) - ).collect::(); - - let field_name = field_name_for(c); - let last_index = c - 1; - let last = quote!(index_of_target(&distribution[#last_index].0).or_invalid_index()?); - - quote!( - #c => compact.#field_name.push( - ( - index_of_voter(&who).or_invalid_index()?, - [#inner], - #last, + let from_impl_rest = (3..=count) + .map(|c| { + let inner = (0..c - 1) + .map( + |i| quote!((index_of_target(&distribution[#i].0).or_invalid_index()?, distribution[#i].1),), ) - ), - ) - }).collect::(); + .collect::(); + + let field_name = field_name_for(c); + let last_index = c - 1; + let last = quote!(index_of_target(&distribution[#last_index].0).or_invalid_index()?); + + quote!( + #c => compact.#field_name.push( + ( + index_of_voter(&who).or_invalid_index()?, + [#inner], + #last, + ) + ), + ) + }) + .collect::(); quote!( #from_impl_single @@ -113,39 +117,41 @@ pub(crate) fn into_impl(count: usize, per_thing: syn::Type) -> TokenStream2 { ) }; - let into_impl_rest = (3..=count).map(|c| { - let name = field_name_for(c); - quote!( - for (voter_index, inners, t_last_idx) in self.#name { - let mut sum = #per_thing::zero(); - let mut inners_parsed = inners - .iter() - .map(|(ref t_idx, p)| { - sum = _npos::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); - let target = target_at(*t_idx).or_invalid_index()?; - Ok((target, *p)) - }) - .collect::, _npos::Error>>()?; - - if sum >= #per_thing::one() { - return Err(_npos::Error::CompactStakeOverflow); + let into_impl_rest = (3..=count) + .map(|c| { + let name = field_name_for(c); + quote!( + for (voter_index, inners, t_last_idx) in self.#name { + let mut sum = #per_thing::zero(); + let mut inners_parsed = inners + .iter() + .map(|(ref t_idx, p)| { + sum = _npos::sp_arithmetic::traits::Saturating::saturating_add(sum, *p); + let target = target_at(*t_idx).or_invalid_index()?; + Ok((target, *p)) + }) + .collect::, _npos::Error>>()?; + + if sum >= #per_thing::one() { + return Err(_npos::Error::CompactStakeOverflow); + } + + // defensive only. Since Percent doesn't have `Sub`. + let p_last = _npos::sp_arithmetic::traits::Saturating::saturating_sub( + #per_thing::one(), + sum, + ); + + inners_parsed.push((target_at(t_last_idx).or_invalid_index()?, p_last)); + + assignments.push(_npos::Assignment { + who: voter_at(voter_index).or_invalid_index()?, + distribution: inners_parsed, + }); } - - // defensive only. Since Percent doesn't have `Sub`. - let p_last = _npos::sp_arithmetic::traits::Saturating::saturating_sub( - #per_thing::one(), - sum, - ); - - inners_parsed.push((target_at(t_last_idx).or_invalid_index()?, p_last)); - - assignments.push(_npos::Assignment { - who: voter_at(voter_index).or_invalid_index()?, - distribution: inners_parsed, - }); - } - ) - }).collect::(); + ) + }) + .collect::(); quote!( #into_impl_single diff --git a/substrate/primitives/npos-elections/compact/src/codec.rs b/substrate/primitives/npos-elections/compact/src/codec.rs index f75f99682711c93852e9fc43cf84bdae9adb9ca9..6d59e11f041bbc16c4cd4dbd3fe3af14199c6ad4 100644 --- a/substrate/primitives/npos-elections/compact/src/codec.rs +++ b/substrate/primitives/npos-elections/compact/src/codec.rs @@ -80,39 +80,42 @@ fn decode_impl( } }; - let decode_impl_rest = (3..=count).map(|c| { - let name = field_name_for(c); - - let inner_impl = (0..c-1).map(|i| - quote! { ( (inner[#i].0).0, (inner[#i].1).0 ), } - ).collect::(); - - quote! { - let #name = - < - _npos::sp_std::prelude::Vec<( - _npos::codec::Compact<#voter_type>, - [(_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>); #c-1], - _npos::codec::Compact<#target_type>, - )> - as _npos::codec::Decode - >::decode(value)?; - let #name = #name - .into_iter() - .map(|(v, inner, t_last)| ( - v.0, - [ #inner_impl ], - t_last.0, - )) - .collect::<_npos::sp_std::prelude::Vec<_>>(); - } - }).collect::(); - + let decode_impl_rest = (3..=count) + .map(|c| { + let name = field_name_for(c); + + let inner_impl = (0..c - 1) + .map(|i| quote! { ( (inner[#i].0).0, (inner[#i].1).0 ), }) + .collect::(); + + quote! { + let #name = + < + _npos::sp_std::prelude::Vec<( + _npos::codec::Compact<#voter_type>, + [(_npos::codec::Compact<#target_type>, _npos::codec::Compact<#weight_type>); #c-1], + _npos::codec::Compact<#target_type>, + )> + as _npos::codec::Decode + >::decode(value)?; + let #name = #name + .into_iter() + .map(|(v, inner, t_last)| ( + v.0, + [ #inner_impl ], + t_last.0, + )) + .collect::<_npos::sp_std::prelude::Vec<_>>(); + } + }) + .collect::(); - let all_field_names = (1..=count).map(|c| { - let name = field_name_for(c); - quote! { #name, } - }).collect::(); + let all_field_names = (1..=count) + .map(|c| { + let name = field_name_for(c); + quote! { #name, } + }) + .collect::(); quote!( impl _npos::codec::Decode for #ident { @@ -165,29 +168,33 @@ fn encode_impl(ident: syn::Ident, count: usize) -> TokenStream2 { } }; - let encode_impl_rest = (3..=count).map(|c| { - let name = field_name_for(c); - - // we use the knowledge of the length to avoid copy_from_slice. - let inners_compact_array = (0..c-1).map(|i| - quote!{( - _npos::codec::Compact(inner[#i].0.clone()), - _npos::codec::Compact(inner[#i].1.clone()), - ),} - ).collect::(); - - quote! { - let #name = self.#name - .iter() - .map(|(v, inner, t_last)| ( - _npos::codec::Compact(v.clone()), - [ #inners_compact_array ], - _npos::codec::Compact(t_last.clone()), - )) - .collect::<_npos::sp_std::prelude::Vec<_>>(); - #name.encode_to(&mut r); - } - }).collect::(); + let encode_impl_rest = (3..=count) + .map(|c| { + let name = field_name_for(c); + + // we use the knowledge of the length to avoid copy_from_slice. + let inners_compact_array = (0..c - 1) + .map(|i| { + quote! {( + _npos::codec::Compact(inner[#i].0.clone()), + _npos::codec::Compact(inner[#i].1.clone()), + ),} + }) + .collect::(); + + quote! { + let #name = self.#name + .iter() + .map(|(v, inner, t_last)| ( + _npos::codec::Compact(v.clone()), + [ #inners_compact_array ], + _npos::codec::Compact(t_last.clone()), + )) + .collect::<_npos::sp_std::prelude::Vec<_>>(); + #name.encode_to(&mut r); + } + }) + .collect::(); quote!( impl _npos::codec::Encode for #ident { diff --git a/substrate/primitives/npos-elections/compact/src/index_assignment.rs b/substrate/primitives/npos-elections/compact/src/index_assignment.rs index 6aeef1442236e84bc96e84390f54c613402c9dcb..347be7d19984f0c5b17ca7ff2f5370cccc66244c 100644 --- a/substrate/primitives/npos-elections/compact/src/index_assignment.rs +++ b/substrate/primitives/npos-elections/compact/src/index_assignment.rs @@ -65,7 +65,7 @@ pub(crate) fn from_impl(count: usize) -> TokenStream2 { ) ), ) - }) + }) .collect::(); quote!( diff --git a/substrate/primitives/npos-elections/compact/src/lib.rs b/substrate/primitives/npos-elections/compact/src/lib.rs index 0e9fbb34eea17d0518758e7571563862449989de..4bf8e8a4de403d1d65b822066629bce1e1cd2de2 100644 --- a/substrate/primitives/npos-elections/compact/src/lib.rs +++ b/substrate/primitives/npos-elections/compact/src/lib.rs @@ -18,7 +18,7 @@ //! Proc macro for a npos compact assignment. use proc_macro::TokenStream; -use proc_macro2::{TokenStream as TokenStream2, Span, Ident}; +use proc_macro2::{Ident, Span, TokenStream as TokenStream2}; use proc_macro_crate::{crate_name, FoundCrate}; use quote::quote; use syn::parse::{Parse, ParseStream, Result}; @@ -82,15 +82,8 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// ``` #[proc_macro] pub fn generate_solution_type(item: TokenStream) -> TokenStream { - let SolutionDef { - vis, - ident, - count, - voter_type, - target_type, - weight_type, - compact_encoding, - } = syn::parse_macro_input!(item as SolutionDef); + let SolutionDef { vis, ident, count, voter_type, target_type, weight_type, compact_encoding } = + syn::parse_macro_input!(item as SolutionDef); let imports = imports().unwrap_or_else(|e| e.to_compile_error()); @@ -102,7 +95,8 @@ pub fn generate_solution_type(item: TokenStream) -> TokenStream { target_type.clone(), weight_type.clone(), compact_encoding, - ).unwrap_or_else(|e| e.to_compile_error()); + ) + .unwrap_or_else(|e| e.to_compile_error()); quote!( #imports @@ -167,7 +161,7 @@ fn struct_def( weight_type.clone(), count, ); - quote!{ + quote! { #compact_impl #[derive(Default, PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] } @@ -321,23 +315,27 @@ fn remove_voter_impl(count: usize) -> TokenStream2 { } fn len_impl(count: usize) -> TokenStream2 { - (1..=count).map(|c| { - let field_name = field_name_for(c); - quote!( - all_len = all_len.saturating_add(self.#field_name.len()); - ) - }).collect::() + (1..=count) + .map(|c| { + let field_name = field_name_for(c); + quote!( + all_len = all_len.saturating_add(self.#field_name.len()); + ) + }) + .collect::() } fn edge_count_impl(count: usize) -> TokenStream2 { - (1..=count).map(|c| { - let field_name = field_name_for(c); - quote!( - all_edges = all_edges.saturating_add( - self.#field_name.len().saturating_mul(#c as usize) - ); - ) - }).collect::() + (1..=count) + .map(|c| { + let field_name = field_name_for(c); + quote!( + all_edges = all_edges.saturating_add( + self.#field_name.len().saturating_mul(#c as usize) + ); + ) + }) + .collect::() } fn unique_targets_impl(count: usize) -> TokenStream2 { @@ -360,17 +358,19 @@ fn unique_targets_impl(count: usize) -> TokenStream2 { } }; - let unique_targets_impl_rest = (3..=count).map(|c| { - let field_name = field_name_for(c); - quote! { - self.#field_name.iter().for_each(|(_, inners, t_last)| { - inners.iter().for_each(|(t, _)| { - maybe_insert_target(*t); + let unique_targets_impl_rest = (3..=count) + .map(|c| { + let field_name = field_name_for(c); + quote! { + self.#field_name.iter().for_each(|(_, inners, t_last)| { + inners.iter().for_each(|(t, _)| { + maybe_insert_target(*t); + }); + maybe_insert_target(*t_last); }); - maybe_insert_target(*t_last); - }); - } - }).collect::(); + } + }) + .collect::(); quote! { #unique_targets_impl_single @@ -440,23 +440,29 @@ impl Parse for SolutionDef { let expected_types = ["VoterIndex", "TargetIndex", "Accuracy"]; - let mut types: Vec = generics.args.iter().zip(expected_types.iter()).map(|(t, expected)| - match t { + let mut types: Vec = generics + .args + .iter() + .zip(expected_types.iter()) + .map(|(t, expected)| match t { syn::GenericArgument::Type(ty) => { // this is now an error - Err(syn::Error::new_spanned(ty, format!("Expected binding: `{} = ...`", expected))) + Err(syn::Error::new_spanned( + ty, + format!("Expected binding: `{} = ...`", expected), + )) }, - syn::GenericArgument::Binding(syn::Binding{ident, ty, ..}) => { + syn::GenericArgument::Binding(syn::Binding { ident, ty, .. }) => { // check that we have the right keyword for this position in the argument list if ident == expected { Ok(ty.clone()) } else { Err(syn::Error::new_spanned(ident, format!("Expected `{}`", expected))) } - } + }, _ => Err(syn_err("Wrong type of generic provided. Must be a `type`.")), - } - ).collect::>()?; + }) + .collect::>()?; let weight_type = types.pop().expect("Vector of length 3 can be popped; qed"); let target_type = types.pop().expect("Vector of length 2 can be popped; qed"); @@ -467,15 +473,15 @@ impl Parse for SolutionDef { let expr = count_expr.expr; let expr_lit = match *expr { syn::Expr::Lit(count_lit) => count_lit.lit, - _ => return Err(syn_err("Count must be literal.")) + _ => return Err(syn_err("Count must be literal.")), }; let int_lit = match expr_lit { syn::Lit::Int(int_lit) => int_lit, - _ => return Err(syn_err("Count must be int literal.")) + _ => return Err(syn_err("Count must be int literal.")), }; let count = int_lit.base10_parse::()?; - Ok(Self { vis, ident, voter_type, target_type, weight_type, count, compact_encoding } ) + Ok(Self { vis, ident, voter_type, target_type, weight_type, count, compact_encoding }) } } diff --git a/substrate/primitives/npos-elections/fuzzer/src/common.rs b/substrate/primitives/npos-elections/fuzzer/src/common.rs index fe237c930de17effb822602f17391ff8c8fa42eb..e97f7f7df8b1120f4b00f85c823705e7a0a28ed2 100644 --- a/substrate/primitives/npos-elections/fuzzer/src/common.rs +++ b/substrate/primitives/npos-elections/fuzzer/src/common.rs @@ -62,11 +62,7 @@ pub fn generate_random_npos_inputs( candidate_count: usize, voter_count: usize, mut rng: impl Rng, -) -> ( - usize, - Vec, - Vec<(AccountId, VoteWeight, Vec)>, -) { +) -> (usize, Vec, Vec<(AccountId, VoteWeight, Vec)>) { // cache for fast generation of unique candidate and voter ids let mut used_ids = HashSet::with_capacity(candidate_count + voter_count); diff --git a/substrate/primitives/npos-elections/fuzzer/src/compact.rs b/substrate/primitives/npos-elections/fuzzer/src/compact.rs index a49f6a535e5f0dc5a6519752a05ebe026d1322f5..b171765e783f7fa5b038bc19bcc12c1d3c4f1559 100644 --- a/substrate/primitives/npos-elections/fuzzer/src/compact.rs +++ b/substrate/primitives/npos-elections/fuzzer/src/compact.rs @@ -1,6 +1,5 @@ use honggfuzz::fuzz; -use sp_npos_elections::generate_solution_type; -use sp_npos_elections::sp_arithmetic::Percent; +use sp_npos_elections::{generate_solution_type, sp_arithmetic::Percent}; use sp_runtime::codec::{Encode, Error}; fn main() { @@ -26,9 +25,8 @@ fn main() { // The reencoded value should definitely be decodable (if unwrap() fails that is a valid // panic/finding for the fuzzer): let decoded2: InnerTestSolutionCompact = - ::decode( - &mut reencoded.as_slice(), - ).unwrap(); + ::decode(&mut reencoded.as_slice()) + .unwrap(); // And it should be equal to the original decoded object (resulting from directly // decoding fuzzer_data): assert_eq!(decoded, decoded2); diff --git a/substrate/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs b/substrate/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs index 4ff18e95d1ef1bdff7a94d8861a01c950cf5136a..04ff60683f9c0119697b5317adcd07b79dc792d7 100644 --- a/substrate/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs +++ b/substrate/primitives/npos-elections/fuzzer/src/phragmen_balancing.rs @@ -21,23 +21,17 @@ mod common; use common::*; use honggfuzz::fuzz; +use rand::{self, SeedableRng}; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, is_score_better, seq_phragmen, to_supports, to_without_backing, EvaluateSupport, VoteWeight, }; use sp_runtime::Perbill; -use rand::{self, SeedableRng}; fn main() { loop { fuzz!(|data: (usize, usize, usize, usize, u64)| { - let ( - mut target_count, - mut voter_count, - mut iterations, - mut to_elect, - seed, - ) = data; + let (mut target_count, mut voter_count, mut iterations, mut to_elect, seed) = data; let rng = rand::rngs::SmallRng::seed_from_u64(seed); target_count = to_range(target_count, 100, 200); voter_count = to_range(voter_count, 100, 200); @@ -48,12 +42,7 @@ fn main() { "++ [voter_count: {} / target_count:{} / to_elect:{} / iterations:{}]", voter_count, target_count, to_elect, iterations, ); - let ( - unbalanced, - candidates, - voters, - stake_of_tree, - ) = generate_random_npos_result( + let (unbalanced, candidates, voters, stake_of_tree) = generate_random_npos_result( voter_count as u64, target_count as u64, to_elect, @@ -61,9 +50,7 @@ fn main() { ElectionType::Phragmen(None), ); - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; + let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; let unbalanced_score = { let staked = assignment_ratio_to_staked_normalized( @@ -76,7 +63,7 @@ fn main() { if score[0] == 0 { // such cases cannot be improved by balancing. - return; + return } score }; @@ -87,34 +74,32 @@ fn main() { candidates, voters, Some((iterations, 0)), - ).unwrap(); + ) + .unwrap(); let balanced_score = { let staked = assignment_ratio_to_staked_normalized( balanced.assignments.clone(), &stake_of, - ).unwrap(); + ) + .unwrap(); let winners = to_without_backing(balanced.winners); to_supports(winners.as_ref(), staked.as_ref()).unwrap().evaluate() - }; let enhance = is_score_better(balanced_score, unbalanced_score, Perbill::zero()); println!( "iter = {} // {:?} -> {:?} [{}]", - iterations, - unbalanced_score, - balanced_score, - enhance, + iterations, unbalanced_score, balanced_score, enhance, ); // The only guarantee of balancing is such that the first and third element of the score // cannot decrease. assert!( balanced_score[0] >= unbalanced_score[0] && - balanced_score[1] == unbalanced_score[1] && - balanced_score[2] <= unbalanced_score[2] + balanced_score[1] == unbalanced_score[1] && + balanced_score[2] <= unbalanced_score[2] ); } }); diff --git a/substrate/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs b/substrate/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs index 49794f21fb2562a7c931c6389adac2614629ba5c..6efc17f24f939c3a723ad00c8077973a249732d5 100644 --- a/substrate/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs +++ b/substrate/primitives/npos-elections/fuzzer/src/phragmen_pjr.rs @@ -37,7 +37,6 @@ //! //! Once a panic is found, it can be debugged with //! `HFUZZ_RUN_ARGS="-t 10" cargo hfuzz run-debug phragmen_pjr hfuzz_workspace/phragmen_pjr/*.fuzz`. -//! #[cfg(fuzzing)] use honggfuzz::fuzz; diff --git a/substrate/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs b/substrate/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs index 8ce7e7d415fa21cef506699b3920c1ead1a90f72..0d8a07489d310166373913be17879d331968cc20 100644 --- a/substrate/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs +++ b/substrate/primitives/npos-elections/fuzzer/src/phragmms_balancing.rs @@ -21,23 +21,17 @@ mod common; use common::*; use honggfuzz::fuzz; +use rand::{self, SeedableRng}; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, is_score_better, phragmms, to_supports, to_without_backing, EvaluateSupport, VoteWeight, }; use sp_runtime::Perbill; -use rand::{self, SeedableRng}; fn main() { loop { fuzz!(|data: (usize, usize, usize, usize, u64)| { - let ( - mut target_count, - mut voter_count, - mut iterations, - mut to_elect, - seed, - ) = data; + let (mut target_count, mut voter_count, mut iterations, mut to_elect, seed) = data; let rng = rand::rngs::SmallRng::seed_from_u64(seed); target_count = to_range(target_count, 100, 200); voter_count = to_range(voter_count, 100, 200); @@ -48,12 +42,7 @@ fn main() { "++ [voter_count: {} / target_count:{} / to_elect:{} / iterations:{}]", voter_count, target_count, to_elect, iterations, ); - let ( - unbalanced, - candidates, - voters, - stake_of_tree, - ) = generate_random_npos_result( + let (unbalanced, candidates, voters, stake_of_tree) = generate_random_npos_result( voter_count as u64, target_count as u64, to_elect, @@ -61,9 +50,7 @@ fn main() { ElectionType::Phragmms(None), ); - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; + let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; let unbalanced_score = { let staked = assignment_ratio_to_staked_normalized( @@ -76,7 +63,7 @@ fn main() { if score[0] == 0 { // such cases cannot be improved by balancing. - return; + return } score }; @@ -86,34 +73,30 @@ fn main() { candidates, voters, Some((iterations, 0)), - ).unwrap(); + ) + .unwrap(); let balanced_score = { let staked = assignment_ratio_to_staked_normalized(balanced.assignments.clone(), &stake_of) .unwrap(); let winners = to_without_backing(balanced.winners); - to_supports(winners.as_ref(), staked.as_ref()) - .unwrap() - .evaluate() + to_supports(winners.as_ref(), staked.as_ref()).unwrap().evaluate() }; let enhance = is_score_better(balanced_score, unbalanced_score, Perbill::zero()); println!( "iter = {} // {:?} -> {:?} [{}]", - iterations, - unbalanced_score, - balanced_score, - enhance, + iterations, unbalanced_score, balanced_score, enhance, ); // The only guarantee of balancing is such that the first and third element of the score // cannot decrease. assert!( balanced_score[0] >= unbalanced_score[0] && - balanced_score[1] == unbalanced_score[1] && - balanced_score[2] <= unbalanced_score[2] + balanced_score[1] == unbalanced_score[1] && + balanced_score[2] <= unbalanced_score[2] ); }); } diff --git a/substrate/primitives/npos-elections/fuzzer/src/reduce.rs b/substrate/primitives/npos-elections/fuzzer/src/reduce.rs index 4ee2468d9d1400e8e2d68c2567127abe0cea8e99..a7e77fdd516a54ce61b437789149104f61eb3a22 100644 --- a/substrate/primitives/npos-elections/fuzzer/src/reduce.rs +++ b/substrate/primitives/npos-elections/fuzzer/src/reduce.rs @@ -34,8 +34,8 @@ use honggfuzz::fuzz; mod common; use common::to_range; -use sp_npos_elections::{reduce, to_support_map, ExtendedBalance, StakedAssignment}; use rand::{self, Rng, RngCore, SeedableRng}; +use sp_npos_elections::{reduce, to_support_map, ExtendedBalance, StakedAssignment}; type Balance = u128; type AccountId = u64; @@ -50,13 +50,8 @@ fn main() { let rng = rand::rngs::SmallRng::seed_from_u64(seed); target_count = to_range(target_count, 100, 1000); voter_count = to_range(voter_count, 100, 2000); - let (assignments, winners) = generate_random_phragmen_assignment( - voter_count, - target_count, - 8, - 8, - rng - ); + let (assignments, winners) = + generate_random_phragmen_assignment(voter_count, target_count, 8, 8, rng); reduce_and_compare(&assignments, &winners); }); } @@ -82,23 +77,27 @@ fn generate_random_phragmen_assignment( (1..=voter_count).for_each(|acc| { let mut targets_to_chose_from = all_targets.clone(); - let targets_to_chose = if edge_per_voter_var > 0 { rng.gen_range( - avg_edge_per_voter - edge_per_voter_var, - avg_edge_per_voter + edge_per_voter_var, - ) } else { avg_edge_per_voter }; - - let distribution = (0..targets_to_chose).map(|_| { - let target = targets_to_chose_from.remove(rng.gen_range(0, targets_to_chose_from.len())); - if winners.iter().find(|w| **w == target).is_none() { - winners.push(target.clone()); - } - (target, rng.gen_range(1 * KSM, 100 * KSM)) - }).collect::>(); - - assignments.push(StakedAssignment { - who: (acc as AccountId), - distribution, - }); + let targets_to_chose = if edge_per_voter_var > 0 { + rng.gen_range( + avg_edge_per_voter - edge_per_voter_var, + avg_edge_per_voter + edge_per_voter_var, + ) + } else { + avg_edge_per_voter + }; + + let distribution = (0..targets_to_chose) + .map(|_| { + let target = + targets_to_chose_from.remove(rng.gen_range(0, targets_to_chose_from.len())); + if winners.iter().find(|w| **w == target).is_none() { + winners.push(target.clone()); + } + (target, rng.gen_range(1 * KSM, 100 * KSM)) + }) + .collect::>(); + + assignments.push(StakedAssignment { who: (acc as AccountId), distribution }); }); (assignments, winners) @@ -117,10 +116,7 @@ fn assert_assignments_equal( } } -fn reduce_and_compare( - assignment: &Vec>, - winners: &Vec, -) { +fn reduce_and_compare(assignment: &Vec>, winners: &Vec) { let mut altered_assignment = assignment.clone(); let n = assignment.len() as u32; let m = winners.len() as u32; @@ -138,15 +134,13 @@ fn reduce_and_compare( num_changed, ); - assert_assignments_equal( - winners, - &assignment, - &altered_assignment, - ); + assert_assignments_equal(winners, &assignment, &altered_assignment); } fn assignment_len(assignments: &[StakedAssignment]) -> u32 { let mut counter = 0; - assignments.iter().for_each(|x| x.distribution.iter().for_each(|_| counter += 1)); + assignments + .iter() + .for_each(|x| x.distribution.iter().for_each(|_| counter += 1)); counter } diff --git a/substrate/primitives/npos-elections/src/assignments.rs b/substrate/primitives/npos-elections/src/assignments.rs index aacd01a03069277406aa56f43ac4dd489ea16891..b0dd29dc1904132ec96e659236eabcc1491b22c7 100644 --- a/substrate/primitives/npos-elections/src/assignments.rs +++ b/substrate/primitives/npos-elections/src/assignments.rs @@ -18,8 +18,11 @@ //! Structs and helpers for distributing a voter's stake among various winners. use crate::{Error, ExtendedBalance, IdentifierT, PerThing128, __OrInvalidIndex}; -use codec::{Encode, Decode}; -use sp_arithmetic::{traits::{Bounded, Zero}, Normalizable, PerThing}; +use codec::{Decode, Encode}; +use sp_arithmetic::{ + traits::{Bounded, Zero}, + Normalizable, PerThing, +}; use sp_core::RuntimeDebug; use sp_std::vec::Vec; @@ -61,10 +64,7 @@ impl Assignment { }) .collect::>(); - StakedAssignment { - who: self.who, - distribution, - } + StakedAssignment { who: self.who, distribution } } /// Try and normalize this assignment. @@ -83,12 +83,13 @@ impl Assignment { .map(|(_, p)| *p) .collect::>() .normalize(P::one()) - .map(|normalized_ratios| - self.distribution - .iter_mut() - .zip(normalized_ratios) - .for_each(|((_, old), corrected)| { *old = corrected; }) - ) + .map(|normalized_ratios| { + self.distribution.iter_mut().zip(normalized_ratios).for_each( + |((_, old), corrected)| { + *old = corrected; + }, + ) + }) } } @@ -118,7 +119,8 @@ impl StakedAssignment { AccountId: IdentifierT, { let stake = self.total(); - let distribution = self.distribution + let distribution = self + .distribution .into_iter() .filter_map(|(target, w)| { let per_thing = P::from_rational(w, stake); @@ -130,10 +132,7 @@ impl StakedAssignment { }) .collect::>(); - Assignment { - who: self.who, - distribution, - } + Assignment { who: self.who, distribution } } /// Try and normalize this assignment. @@ -152,12 +151,13 @@ impl StakedAssignment { .map(|(_, ref weight)| *weight) .collect::>() .normalize(stake) - .map(|normalized_weights| - self.distribution - .iter_mut() - .zip(normalized_weights.into_iter()) - .for_each(|((_, weight), corrected)| { *weight = corrected; }) - ) + .map(|normalized_weights| { + self.distribution.iter_mut().zip(normalized_weights.into_iter()).for_each( + |((_, weight), corrected)| { + *weight = corrected; + }, + ) + }) } /// Get the total stake of this assignment (aka voter budget). diff --git a/substrate/primitives/npos-elections/src/balancing.rs b/substrate/primitives/npos-elections/src/balancing.rs index 48cb980d78c338ac96cace691ea7cae1095bab35..378ebe8e84fd1f38bb65efa8986013035c274aa9 100644 --- a/substrate/primitives/npos-elections/src/balancing.rs +++ b/substrate/primitives/npos-elections/src/balancing.rs @@ -26,7 +26,7 @@ //! //! See [`balance`] for more information. -use crate::{IdentifierT, Voter, ExtendedBalance, Edge}; +use crate::{Edge, ExtendedBalance, IdentifierT, Voter}; use sp_arithmetic::traits::Zero; use sp_std::prelude::*; @@ -57,19 +57,23 @@ pub fn balance( iterations: usize, tolerance: ExtendedBalance, ) -> usize { - if iterations == 0 { return 0; } + if iterations == 0 { + return 0 + } let mut iter = 0; loop { let mut max_diff = 0; for voter in voters.iter_mut() { let diff = balance_voter(voter, tolerance); - if diff > max_diff { max_diff = diff; } + if diff > max_diff { + max_diff = diff; + } } iter += 1; if max_diff <= tolerance || iter >= iterations { - break iter; + break iter } } } @@ -80,7 +84,8 @@ pub(crate) fn balance_voter( tolerance: ExtendedBalance, ) -> ExtendedBalance { // create a shallow copy of the elected ones. The original one will not be used henceforth. - let mut elected_edges = voter.edges + let mut elected_edges = voter + .edges .iter_mut() .filter(|e| e.candidate.borrow().elected) .collect::>>(); @@ -91,9 +96,8 @@ pub(crate) fn balance_voter( } // amount of stake from this voter that is used in edges. - let stake_used = elected_edges - .iter() - .fold(0, |a: ExtendedBalance, e| a.saturating_add(e.weight)); + let stake_used = + elected_edges.iter().fold(0, |a: ExtendedBalance, e| a.saturating_add(e.weight)); // backed stake of each of the elected edges. let backed_stakes = elected_edges @@ -104,13 +108,7 @@ pub(crate) fn balance_voter( // backed stake of all the edges for whom we've spent some stake. let backing_backed_stake = elected_edges .iter() - .filter_map(|e| - if e.weight > 0 { - Some(e.candidate.borrow().backed_stake) - } else { - None - } - ) + .filter_map(|e| if e.weight > 0 { Some(e.candidate.borrow().backed_stake) } else { None }) .collect::>(); let difference = if backing_backed_stake.len() > 0 { @@ -125,7 +123,7 @@ pub(crate) fn balance_voter( let mut difference = max_stake.saturating_sub(*min_stake); difference = difference.saturating_add(voter.budget.saturating_sub(stake_used)); if difference < tolerance { - return difference; + return difference } difference } else { @@ -156,12 +154,18 @@ pub(crate) fn balance_voter( cumulative_backed_stake = cumulative_backed_stake.saturating_add(backed_stake); } - let last_stake = elected_edges.get(last_index).expect( - "length of elected_edges is greater than or equal 2; last_index index is at \ - the minimum elected_edges.len() - 1; index is within range; qed" - ).candidate.borrow().backed_stake; + let last_stake = elected_edges + .get(last_index) + .expect( + "length of elected_edges is greater than or equal 2; last_index index is at \ + the minimum elected_edges.len() - 1; index is within range; qed", + ) + .candidate + .borrow() + .backed_stake; let ways_to_split = last_index + 1; - let excess = voter.budget + let excess = voter + .budget .saturating_add(cumulative_backed_stake) .saturating_sub(last_stake.saturating_mul(ways_to_split as ExtendedBalance)); diff --git a/substrate/primitives/npos-elections/src/helpers.rs b/substrate/primitives/npos-elections/src/helpers.rs index 9fdf76118f89f25231296720921cd245c2bf828c..5b02eaf2ad2e83b733428b208f8aa86bf7a07d13 100644 --- a/substrate/primitives/npos-elections/src/helpers.rs +++ b/substrate/primitives/npos-elections/src/helpers.rs @@ -17,7 +17,9 @@ //! Helper methods for npos-elections. -use crate::{Assignment, Error, IdentifierT, PerThing128, StakedAssignment, VoteWeight, WithApprovalOf}; +use crate::{ + Assignment, Error, IdentifierT, PerThing128, StakedAssignment, VoteWeight, WithApprovalOf, +}; use sp_arithmetic::PerThing; use sp_std::prelude::*; @@ -52,7 +54,8 @@ where staked .iter_mut() .map(|a| { - a.try_normalize(stake_of(&a.who).into()).map_err(|err| Error::ArithmeticError(err)) + a.try_normalize(stake_of(&a.who).into()) + .map_err(|err| Error::ArithmeticError(err)) }) .collect::>()?; Ok(staked) @@ -113,14 +116,8 @@ mod tests { assert_eq!( staked, vec![ - StakedAssignment { - who: 1u32, - distribution: vec![(10u32, 50), (20, 50),] - }, - StakedAssignment { - who: 2u32, - distribution: vec![(10u32, 33), (20, 67),] - } + StakedAssignment { who: 1u32, distribution: vec![(10u32, 50), (20, 50),] }, + StakedAssignment { who: 2u32, distribution: vec![(10u32, 33), (20, 67),] } ] ); } diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs index c1cf41a40f2b53aa40cbb4f955f02d73b8aa9912..ece5be33b114a46337add0e68cdc3e12e0639629 100644 --- a/substrate/primitives/npos-elections/src/lib.rs +++ b/substrate/primitives/npos-elections/src/lib.rs @@ -78,6 +78,7 @@ use sp_arithmetic::{ traits::{Bounded, UniqueSaturatedInto, Zero}, Normalizable, PerThing, Rational128, ThresholdOrd, }; +use sp_core::RuntimeDebug; use sp_std::{ cell::RefCell, cmp::Ordering, @@ -88,7 +89,6 @@ use sp_std::{ prelude::*, rc::Rc, }; -use sp_core::RuntimeDebug; use codec::{Decode, Encode}; #[cfg(feature = "std")] @@ -100,21 +100,21 @@ mod mock; mod tests; mod assignments; -pub mod phragmen; pub mod balancing; -pub mod phragmms; -pub mod node; -pub mod reduce; pub mod helpers; +pub mod node; +pub mod phragmen; +pub mod phragmms; pub mod pjr; +pub mod reduce; -pub use assignments::{Assignment, IndexAssignment, StakedAssignment, IndexAssignmentOf}; -pub use reduce::reduce; +pub use assignments::{Assignment, IndexAssignment, IndexAssignmentOf, StakedAssignment}; +pub use balancing::*; pub use helpers::*; pub use phragmen::*; pub use phragmms::*; -pub use balancing::*; pub use pjr::*; +pub use reduce::reduce; // re-export the compact macro, with the dependencies of the macro. #[doc(hidden)] @@ -206,9 +206,7 @@ where /// Get the average edge count. fn average_edge_count(&self) -> usize { - self.edge_count() - .checked_div(self.voter_count()) - .unwrap_or(0) + self.edge_count().checked_div(self.voter_count()).unwrap_or(0) } /// Remove a certain voter. @@ -379,9 +377,14 @@ impl Voter { .into_iter() .filter_map(|e| { let per_thing = P::from_rational(e.weight, budget); - // trim zero edges. - if per_thing.is_zero() { None } else { Some((e.who, per_thing)) } - }).collect::>(); + // trim zero edges. + if per_thing.is_zero() { + None + } else { + Some((e.who, per_thing)) + } + }) + .collect::>(); if distribution.len() > 0 { Some(Assignment { who, distribution }) @@ -611,10 +614,7 @@ pub fn is_score_better(this: ElectionScore, that: ElectionScore, ep match this .iter() .zip(that.iter()) - .map(|(thi, tha)| ( - thi.ge(&tha), - thi.tcmp(&tha, epsilon.mul_ceil(*tha)), - )) + .map(|(thi, tha)| (thi.ge(&tha), thi.tcmp(&tha, epsilon.mul_ceil(*tha)))) .collect::>() .as_slice() { @@ -653,40 +653,34 @@ pub fn setup_inputs( }) .collect::>>(); - let voters = initial_voters.into_iter().filter_map(|(who, voter_stake, votes)| { - let mut edges: Vec> = Vec::with_capacity(votes.len()); - for v in votes { - if edges.iter().any(|e| e.who == v) { - // duplicate edge. - continue; - } - if let Some(idx) = c_idx_cache.get(&v) { - // This candidate is valid + already cached. - let mut candidate = candidates[*idx].borrow_mut(); - candidate.approval_stake = - candidate.approval_stake.saturating_add(voter_stake.into()); - edges.push( - Edge { + let voters = initial_voters + .into_iter() + .filter_map(|(who, voter_stake, votes)| { + let mut edges: Vec> = Vec::with_capacity(votes.len()); + for v in votes { + if edges.iter().any(|e| e.who == v) { + // duplicate edge. + continue + } + if let Some(idx) = c_idx_cache.get(&v) { + // This candidate is valid + already cached. + let mut candidate = candidates[*idx].borrow_mut(); + candidate.approval_stake = + candidate.approval_stake.saturating_add(voter_stake.into()); + edges.push(Edge { who: v.clone(), candidate: Rc::clone(&candidates[*idx]), ..Default::default() - } - ); - } // else {} would be wrong votes. We don't really care about it. - } - if edges.is_empty() { - None - } - else { - Some(Voter { - who, - edges: edges, - budget: voter_stake.into(), - load: Rational128::zero(), - }) - } - - }).collect::>(); + }); + } // else {} would be wrong votes. We don't really care about it. + } + if edges.is_empty() { + None + } else { + Some(Voter { who, edges, budget: voter_stake.into(), load: Rational128::zero() }) + } + }) + .collect::>(); - (candidates, voters,) + (candidates, voters) } diff --git a/substrate/primitives/npos-elections/src/mock.rs b/substrate/primitives/npos-elections/src/mock.rs index 363550ed8efccabb9cd856f9f697d3692d1a7a09..1be591e4ea6f55169233ad3018ffd6013eeea23a 100644 --- a/substrate/primitives/npos-elections/src/mock.rs +++ b/substrate/primitives/npos-elections/src/mock.rs @@ -20,12 +20,12 @@ #![cfg(any(test, mocks))] use std::{ - collections::{HashSet, HashMap}, + collections::{HashMap, HashSet}, convert::TryInto, hash::Hash, }; -use rand::{self, Rng, seq::SliceRandom}; +use rand::{self, seq::SliceRandom, Rng}; use sp_arithmetic::{ traits::{One, SaturatedConversion, Zero}, PerThing, @@ -33,7 +33,7 @@ use sp_arithmetic::{ use sp_runtime::assert_eq_error_rate; use sp_std::collections::btree_map::BTreeMap; -use crate::{Assignment, ElectionResult, ExtendedBalance, PerThing128, VoteWeight, seq_phragmen}; +use crate::{seq_phragmen, Assignment, ElectionResult, ExtendedBalance, PerThing128, VoteWeight}; sp_npos_elections_compact::generate_solution_type!( #[compact] @@ -87,7 +87,7 @@ pub(crate) type _SupportMap = BTreeMap>; #[derive(Debug, Clone)] pub(crate) struct _ElectionResult { pub winners: Vec<(A, ExtendedBalance)>, - pub assignments: Vec<(A, Vec<_Assignment>)> + pub assignments: Vec<(A, Vec<_Assignment>)>, } pub(crate) fn auto_generate_self_voters(candidates: &[A]) -> Vec<(A, Vec)> { @@ -99,7 +99,8 @@ pub(crate) fn elect_float( initial_candidates: Vec, initial_voters: Vec<(A, Vec)>, stake_of: impl Fn(&A) -> VoteWeight, -) -> Option<_ElectionResult> where +) -> Option<_ElectionResult> +where A: Default + Ord + Copy, { let mut elected_candidates: Vec<(A, ExtendedBalance)>; @@ -123,17 +124,10 @@ pub(crate) fn elect_float( for v in votes { if let Some(idx) = c_idx_cache.get(&v) { candidates[*idx].approval_stake = candidates[*idx].approval_stake + voter_stake; - edges.push( - _Edge { who: v.clone(), candidate_index: *idx, ..Default::default() } - ); + edges.push(_Edge { who: v.clone(), candidate_index: *idx, ..Default::default() }); } } - _Voter { - who, - edges: edges, - budget: voter_stake, - load: 0f64, - } + _Voter { who, edges, budget: voter_stake, load: 0f64 } })); let to_elect = candidate_count.min(candidates.len()); @@ -179,7 +173,9 @@ pub(crate) fn elect_float( for n in &mut voters { let mut assignment = (n.who.clone(), vec![]); for e in &mut n.edges { - if let Some(c) = elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) { + if let Some(c) = + elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) + { if c != n.who { let ratio = e.load / n.load; assignment.1.push((e.who.clone(), ratio)); @@ -191,10 +187,7 @@ pub(crate) fn elect_float( } } - Some(_ElectionResult { - winners: elected_candidates, - assignments: assigned, - }) + Some(_ElectionResult { winners: elected_candidates, assignments: assigned }) } pub(crate) fn equalize_float( @@ -211,18 +204,14 @@ pub(crate) fn equalize_float( let mut max_diff = 0.0; for (voter, assignment) in assignments.iter_mut() { let voter_budget = stake_of(&voter); - let diff = do_equalize_float( - voter, - voter_budget, - assignment, - supports, - tolerance, - ); - if diff > max_diff { max_diff = diff; } + let diff = do_equalize_float(voter, voter_budget, assignment, supports, tolerance); + if diff > max_diff { + max_diff = diff; + } } if max_diff < tolerance { - break; + break } } } @@ -232,21 +221,20 @@ pub(crate) fn do_equalize_float( budget_balance: VoteWeight, elected_edges: &mut Vec<_Assignment>, support_map: &mut _SupportMap, - tolerance: f64 -) -> f64 where + tolerance: f64, +) -> f64 +where A: Ord + Clone, { let budget = budget_balance as f64; - if elected_edges.is_empty() { return 0.0; } + if elected_edges.is_empty() { + return 0.0 + } - let stake_used = elected_edges - .iter() - .fold(0.0, |s, e| s + e.1); + let stake_used = elected_edges.iter().fold(0.0, |s, e| s + e.1); - let backed_stakes_iter = elected_edges - .iter() - .filter_map(|e| support_map.get(&e.0)) - .map(|e| e.total); + let backed_stakes_iter = + elected_edges.iter().filter_map(|e| support_map.get(&e.0)).map(|e| e.total); let backing_backed_stake = elected_edges .iter() @@ -268,7 +256,7 @@ pub(crate) fn do_equalize_float( difference = max_stake - min_stake; difference = difference + budget - stake_used; if difference < tolerance { - return difference; + return difference } } else { difference = budget; @@ -283,11 +271,12 @@ pub(crate) fn do_equalize_float( e.1 = 0.0; }); - elected_edges.sort_by(|x, y| - support_map.get(&x.0) + elected_edges.sort_by(|x, y| { + support_map + .get(&x.0) .and_then(|x| support_map.get(&y.0).and_then(|y| x.total.partial_cmp(&y.total))) .unwrap_or(sp_std::cmp::Ordering::Equal) - ); + }); let mut cumulative_stake = 0.0; let mut last_index = elected_edges.len() - 1; @@ -318,20 +307,22 @@ pub(crate) fn do_equalize_float( difference } - -pub(crate) fn create_stake_of(stakes: &[(AccountId, VoteWeight)]) - -> impl Fn(&AccountId) -> VoteWeight -{ +pub(crate) fn create_stake_of( + stakes: &[(AccountId, VoteWeight)], +) -> impl Fn(&AccountId) -> VoteWeight { let mut storage = BTreeMap::::new(); - stakes.iter().for_each(|s| { storage.insert(s.0, s.1); }); + stakes.iter().for_each(|s| { + storage.insert(s.0, s.1); + }); move |who: &AccountId| -> VoteWeight { storage.get(who).unwrap().to_owned() } } - pub fn check_assignments_sum(assignments: &[Assignment]) { for Assignment { distribution, .. } in assignments { let mut sum: u128 = Zero::zero(); - distribution.iter().for_each(|(_, p)| sum += p.deconstruct().saturated_into::()); + distribution + .iter() + .for_each(|(_, p)| sum += p.deconstruct().saturated_into::()); assert_eq!(sum, T::ACCURACY.saturated_into(), "Assignment ratio sum is not 100%"); } } @@ -341,8 +332,7 @@ pub(crate) fn run_and_compare( voters: Vec<(AccountId, Vec)>, stake_of: FS, to_elect: usize, -) -where +) where Output: PerThing128, FS: Fn(&AccountId) -> VoteWeight, { @@ -350,24 +340,28 @@ where let ElectionResult { winners, assignments } = seq_phragmen::<_, Output>( to_elect, candidates.clone(), - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - None - ).unwrap(); + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + None, + ) + .unwrap(); // run float poc code. - let truth_value = elect_float( - to_elect, - candidates, - voters, - &stake_of, - ).unwrap(); + let truth_value = elect_float(to_elect, candidates, voters, &stake_of).unwrap(); - assert_eq!(winners.iter().map(|(x, _)| x).collect::>(), truth_value.winners.iter().map(|(x, _)| x).collect::>()); + assert_eq!( + winners.iter().map(|(x, _)| x).collect::>(), + truth_value.winners.iter().map(|(x, _)| x).collect::>() + ); for Assignment { who, distribution } in assignments.iter() { if let Some(float_assignments) = truth_value.assignments.iter().find(|x| x.0 == *who) { for (candidate, per_thingy) in distribution { - if let Some(float_assignment) = float_assignments.1.iter().find(|x| x.0 == *candidate ) { + if let Some(float_assignment) = + float_assignments.1.iter().find(|x| x.0 == *candidate) + { assert_eq_error_rate!( Output::from_float(float_assignment.1).deconstruct(), per_thingy.deconstruct(), @@ -376,8 +370,7 @@ where } else { panic!( "candidate mismatch. This should never happen. could not find ({:?}, {:?})", - candidate, - per_thingy, + candidate, per_thingy, ) } } @@ -394,13 +387,10 @@ pub(crate) fn build_support_map_float( stake_of: impl Fn(&AccountId) -> VoteWeight, ) -> _SupportMap { let mut supports = <_SupportMap>::new(); - result.winners - .iter() - .map(|(e, _)| (e, stake_of(e) as f64)) - .for_each(|(e, s)| { - let item = _Support { own: s, total: s, ..Default::default() }; - supports.insert(e.clone(), item); - }); + result.winners.iter().map(|(e, _)| (e, stake_of(e) as f64)).for_each(|(e, s)| { + let item = _Support { own: s, total: s, ..Default::default() }; + supports.insert(e.clone(), item); + }); for (n, assignment) in result.assignments.iter_mut() { for (c, r) in assignment.iter_mut() { diff --git a/substrate/primitives/npos-elections/src/node.rs b/substrate/primitives/npos-elections/src/node.rs index ae65318ff0461e756a76f3848f3752c8638a0b24..ac03f547d2cbd9a7e8bdeb082eff5f541cc641b9 100644 --- a/substrate/primitives/npos-elections/src/node.rs +++ b/substrate/primitives/npos-elections/src/node.rs @@ -55,11 +55,7 @@ impl sp_std::fmt::Debug for NodeId { f, "Node({:?}, {:?})", self.who, - if self.role == NodeRole::Voter { - "V" - } else { - "T" - } + if self.role == NodeRole::Voter { "V" } else { "T" } ) } } @@ -84,12 +80,7 @@ impl Eq for Node {} #[cfg(feature = "std")] impl fmt::Debug for Node { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "({:?} --> {:?})", - self.id, - self.parent.as_ref().map(|p| p.borrow().id.clone()) - ) + write!(f, "({:?} --> {:?})", self.id, self.parent.as_ref().map(|p| p.borrow().id.clone())) } } @@ -102,7 +93,7 @@ impl Node { /// Returns true if `other` is the parent of `who`. pub fn is_parent_of(who: &NodeRef, other: &NodeRef) -> bool { if who.borrow().parent.is_none() { - return false; + return false } who.borrow().parent.as_ref() == Some(other) } @@ -136,7 +127,7 @@ impl Node { while let Some(ref next_parent) = current.clone().borrow().parent { if visited.contains(next_parent) { - break; + break } parent_path.push(next_parent.clone()); current = next_parent.clone(); @@ -164,16 +155,7 @@ mod tests { #[test] fn basic_create_works() { let node = Node::new(id(10)); - assert_eq!( - node, - Node { - id: NodeId { - who: 10, - role: NodeRole::Target - }, - parent: None - } - ); + assert_eq!(node, Node { id: NodeId { who: 10, role: NodeRole::Target }, parent: None }); } #[test] @@ -194,9 +176,9 @@ mod tests { #[test] fn get_root_works() { - // D <-- A <-- B <-- C - // \ - // <-- E + // D <-- A <-- B <-- C + // \ + // <-- E let a = Node::new(id(1)).into_ref(); let b = Node::new(id(2)).into_ref(); let c = Node::new(id(3)).into_ref(); @@ -209,29 +191,20 @@ mod tests { Node::set_parent_of(&e, &a); Node::set_parent_of(&a, &d); - assert_eq!( - Node::root(&e), - (d.clone(), vec![e.clone(), a.clone(), d.clone()]), - ); + assert_eq!(Node::root(&e), (d.clone(), vec![e.clone(), a.clone(), d.clone()]),); assert_eq!(Node::root(&a), (d.clone(), vec![a.clone(), d.clone()]),); - assert_eq!( - Node::root(&c), - (d.clone(), vec![c.clone(), b.clone(), a.clone(), d.clone()]), - ); + assert_eq!(Node::root(&c), (d.clone(), vec![c.clone(), b.clone(), a.clone(), d.clone()]),); - // D A <-- B <-- C - // F <-- / \ - // <-- E + // D A <-- B <-- C + // F <-- / \ + // <-- E Node::set_parent_of(&a, &f); assert_eq!(Node::root(&a), (f.clone(), vec![a.clone(), f.clone()]),); - assert_eq!( - Node::root(&c), - (f.clone(), vec![c.clone(), b.clone(), a.clone(), f.clone()]), - ); + assert_eq!(Node::root(&c), (f.clone(), vec![c.clone(), b.clone(), a.clone(), f.clone()]),); } #[test] diff --git a/substrate/primitives/npos-elections/src/phragmen.rs b/substrate/primitives/npos-elections/src/phragmen.rs index bbead91c938f8c0be4f021c104c9aae4ee78466b..0f9b1449197612f5061d4f3d5f8a7c36a7a901ec 100644 --- a/substrate/primitives/npos-elections/src/phragmen.rs +++ b/substrate/primitives/npos-elections/src/phragmen.rs @@ -75,11 +75,7 @@ pub fn seq_phragmen( ) -> Result, crate::Error> { let (candidates, voters) = setup_inputs(initial_candidates, initial_voters); - let (candidates, mut voters) = seq_phragmen_core::( - rounds, - candidates, - voters, - )?; + let (candidates, mut voters) = seq_phragmen_core::(rounds, candidates, voters)?; if let Some((iterations, tolerance)) = balance { // NOTE: might create zero-edges, but we will strip them again when we convert voter into @@ -152,7 +148,8 @@ pub fn seq_phragmen_core( voter.load.n(), voter.budget, candidate.approval_stake, - ).unwrap_or(Bounded::max_value()); + ) + .unwrap_or(Bounded::max_value()); let temp_d = voter.load.d(); let temp = Rational128::from(temp_n, temp_d); candidate.score = candidate.score.lazy_saturating_add(temp); @@ -188,13 +185,9 @@ pub fn seq_phragmen_core( for edge in &mut voter.edges { if edge.candidate.borrow().elected { // update internal state. - edge.weight = multiply_by_rational( - voter.budget, - edge.load.n(), - voter.load.n(), - ) - // If result cannot fit in u128. Not much we can do about it. - .unwrap_or(Bounded::max_value()); + edge.weight = multiply_by_rational(voter.budget, edge.load.n(), voter.load.n()) + // If result cannot fit in u128. Not much we can do about it. + .unwrap_or(Bounded::max_value()); } else { edge.weight = 0 } diff --git a/substrate/primitives/npos-elections/src/phragmms.rs b/substrate/primitives/npos-elections/src/phragmms.rs index 2a643d3673a528afb122f386fc5148906bfe6f53..95551d9761fcb7f40d03963fcc67f7e5f78777a0 100644 --- a/substrate/primitives/npos-elections/src/phragmms.rs +++ b/substrate/primitives/npos-elections/src/phragmms.rs @@ -1,4 +1,4 @@ - // This file is part of Substrate. +// This file is part of Substrate. // Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -22,10 +22,10 @@ //! MMS algorithm. use crate::{ - IdentifierT, ElectionResult, ExtendedBalance, setup_inputs, VoteWeight, Voter, CandidatePtr, - balance, PerThing128, + balance, setup_inputs, CandidatePtr, ElectionResult, ExtendedBalance, IdentifierT, PerThing128, + VoteWeight, Voter, }; -use sp_arithmetic::{PerThing, Rational128, traits::Bounded}; +use sp_arithmetic::{traits::Bounded, PerThing, Rational128}; use sp_std::{prelude::*, rc::Rc}; /// Execute the phragmms method. @@ -62,15 +62,17 @@ pub fn phragmms( balance(&mut voters, iterations, tolerance); } } else { - break; + break } } - let mut assignments = voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); + let mut assignments = + voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); let _ = assignments.iter_mut().map(|a| a.try_normalize()).collect::>()?; - let winners = winners.into_iter().map(|w_ptr| - (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake) - ).collect(); + let winners = winners + .into_iter() + .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) + .collect(); Ok(ElectionResult { winners, assignments }) } @@ -101,10 +103,8 @@ pub(crate) fn calculate_max_score( for edge in voter.edges.iter() { let edge_candidate = edge.candidate.borrow(); if edge_candidate.elected { - let edge_contribution: ExtendedBalance = P::from_rational( - edge.weight, - edge_candidate.backed_stake, - ).deconstruct().into(); + let edge_contribution: ExtendedBalance = + P::from_rational(edge.weight, edge_candidate.backed_stake).deconstruct().into(); denominator_contribution += edge_contribution; } } @@ -125,7 +125,7 @@ pub(crate) fn calculate_max_score( for c_ptr in candidates.iter() { let mut candidate = c_ptr.borrow_mut(); - if candidate.approval_stake > 0 { + if candidate.approval_stake > 0 { // finalise the score value. let score_d = candidate.score.d(); let one: ExtendedBalance = P::ACCURACY.into(); @@ -153,7 +153,10 @@ pub(crate) fn calculate_max_score( // `RationalInfinite` as the score type does not introduce significant overhead. Then we // can switch the score type to `RationalInfinite` and ensure compatibility with any // crazy token scale. - let score_n = candidate.approval_stake.checked_mul(one).unwrap_or_else(|| Bounded::max_value()); + let score_n = candidate + .approval_stake + .checked_mul(one) + .unwrap_or_else(|| Bounded::max_value()); candidate.score = Rational128::from(score_n, score_d); // check if we have a new winner. @@ -180,7 +183,10 @@ pub(crate) fn apply_elected( elected_ptr: CandidatePtr, ) { let elected_who = elected_ptr.borrow().who.clone(); - let cutoff = elected_ptr.borrow().score.to_den(1) + let cutoff = elected_ptr + .borrow() + .score + .to_den(1) .expect("(n / d) < u128::MAX and (n' / 1) == (n / d), thus n' < u128::MAX'; qed.") .n(); @@ -193,18 +199,19 @@ pub(crate) fn apply_elected( elected_backed_stake = elected_backed_stake.saturating_add(new_edge_weight); // Iterate over all other edges. - for (_, edge) in voter.edges - .iter_mut() - .enumerate() - .filter(|(edge_index, edge_inner)| *edge_index != new_edge_index && edge_inner.weight > 0) - { + for (_, edge) in + voter.edges.iter_mut().enumerate().filter(|(edge_index, edge_inner)| { + *edge_index != new_edge_index && edge_inner.weight > 0 + }) { let mut edge_candidate = edge.candidate.borrow_mut(); if edge_candidate.backed_stake > cutoff { - let stake_to_take = edge.weight.saturating_mul(cutoff) / edge_candidate.backed_stake.max(1); + let stake_to_take = + edge.weight.saturating_mul(cutoff) / edge_candidate.backed_stake.max(1); // subtract this amount from this edge. edge.weight = edge.weight.saturating_sub(stake_to_take); - edge_candidate.backed_stake = edge_candidate.backed_stake.saturating_sub(stake_to_take); + edge_candidate.backed_stake = + edge_candidate.backed_stake.saturating_sub(stake_to_take); // inject it into the outer loop's edge. elected_backed_stake = elected_backed_stake.saturating_add(stake_to_take); @@ -223,7 +230,7 @@ pub(crate) fn apply_elected( #[cfg(test)] mod tests { use super::*; - use crate::{ElectionResult, Assignment}; + use crate::{Assignment, ElectionResult}; use sp_runtime::{Perbill, Percent}; use sp_std::rc::Rc; @@ -232,32 +239,31 @@ mod tests { //! Manually run the internal steps of phragmms. In each round we select a new winner by //! `max_score`, then apply this change by `apply_elected`, and finally do a `balance` round. let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 10, vec![1, 2]), - (20, 20, vec![1, 3]), - (30, 30, vec![2, 3]), - ]; + let voters = vec![(10, 10, vec![1, 2]), (20, 20, vec![1, 3]), (30, 30, vec![2, 3])]; let (candidates, mut voters) = setup_inputs(candidates, voters); // Round 1 - let winner = calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); + let winner = + calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); assert_eq!(winner.borrow().who, 3); assert_eq!(winner.borrow().score, 50u32.into()); apply_elected(&mut voters, Rc::clone(&winner)); assert_eq!( - voters.iter().find(|x| x.who == 30).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 30) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (30, vec![(2, 0), (3, 30)]), ); assert_eq!( - voters.iter().find(|x| x.who == 20).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 20) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (20, vec![(1, 0), (3, 20)]), ); @@ -270,30 +276,34 @@ mod tests { balance(&mut voters, 10, 0); // round 2 - let winner = calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); + let winner = + calculate_max_score::(candidates.as_ref(), voters.as_ref()).unwrap(); assert_eq!(winner.borrow().who, 2); assert_eq!(winner.borrow().score, 25u32.into()); apply_elected(&mut voters, Rc::clone(&winner)); assert_eq!( - voters.iter().find(|x| x.who == 30).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 30) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (30, vec![(2, 15), (3, 15)]), ); assert_eq!( - voters.iter().find(|x| x.who == 20).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 20) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (20, vec![(1, 0), (3, 20)]), ); assert_eq!( - voters.iter().find(|x| x.who == 10).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 10) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (10, vec![(1, 0), (2, 10)]), ); @@ -306,24 +316,27 @@ mod tests { balance(&mut voters, 10, 0); assert_eq!( - voters.iter().find(|x| x.who == 30).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 30) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (30, vec![(2, 20), (3, 10)]), ); assert_eq!( - voters.iter().find(|x| x.who == 20).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 20) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (20, vec![(1, 0), (3, 20)]), ); assert_eq!( - voters.iter().find(|x| x.who == 10).map(|v| ( - v.who, - v.edges.iter().map(|e| (e.who, e.weight)).collect::>() - )).unwrap(), + voters + .iter() + .find(|x| x.who == 10) + .map(|v| (v.who, v.edges.iter().map(|e| (e.who, e.weight)).collect::>())) + .unwrap(), (10, vec![(1, 0), (2, 10)]), ); } @@ -331,25 +344,16 @@ mod tests { #[test] fn basic_election_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 10, vec![1, 2]), - (20, 20, vec![1, 3]), - (30, 30, vec![2, 3]), - ]; + let voters = vec![(10, 10, vec![1, 2]), (20, 20, vec![1, 3]), (30, 30, vec![2, 3])]; - let ElectionResult { winners, assignments } = phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); + let ElectionResult { winners, assignments } = + phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); assert_eq!(winners, vec![(3, 30), (2, 30)]); assert_eq!( assignments, vec![ - Assignment { - who: 10u64, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 20, - distribution: vec![(3, Perbill::one())], - }, + Assignment { who: 10u64, distribution: vec![(2, Perbill::one())] }, + Assignment { who: 20, distribution: vec![(3, Perbill::one())] }, Assignment { who: 30, distribution: vec![ @@ -374,13 +378,9 @@ mod tests { (130, 1000, vec![61, 71]), ]; - let ElectionResult { winners, assignments: _ } = phragmms::<_, Perbill>(4, candidates, voters, Some((2, 0))).unwrap(); - assert_eq!(winners, vec![ - (11, 3000), - (31, 2000), - (51, 1500), - (61, 1500), - ]); + let ElectionResult { winners, assignments: _ } = + phragmms::<_, Perbill>(4, candidates, voters, Some((2, 0))).unwrap(); + assert_eq!(winners, vec![(11, 3000), (31, 2000), (51, 1500), (61, 1500),]); } #[test] @@ -391,7 +391,8 @@ mod tests { // give a bit more to 1 and 3. voters.push((2, u64::MAX, vec![1, 3])); - let ElectionResult { winners, assignments: _ } = phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); + let ElectionResult { winners, assignments: _ } = + phragmms::<_, Perbill>(2, candidates, voters, Some((2, 0))).unwrap(); assert_eq!(winners.into_iter().map(|(w, _)| w).collect::>(), vec![1u32, 3]); } } diff --git a/substrate/primitives/npos-elections/src/pjr.rs b/substrate/primitives/npos-elections/src/pjr.rs index 290110b14e6501dc2a8942454042902ed97f1466..3cc99b33aa57c13a8e8c20dd22ef148d8237191a 100644 --- a/substrate/primitives/npos-elections/src/pjr.rs +++ b/substrate/primitives/npos-elections/src/pjr.rs @@ -1,4 +1,4 @@ - // This file is part of Substrate. +// This file is part of Substrate. // Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 @@ -23,20 +23,11 @@ //! See [`pjr_check`] which is the main entry point of the module. use crate::{ - Candidate, - CandidatePtr, - Edge, - ExtendedBalance, - IdentifierT, - Support, - SupportMap, - Supports, - Voter, - VoteWeight, + Candidate, CandidatePtr, Edge, ExtendedBalance, IdentifierT, Support, SupportMap, Supports, + VoteWeight, Voter, }; -use sp_std::{rc::Rc, vec::Vec}; -use sp_std::collections::btree_map::BTreeMap; use sp_arithmetic::{traits::Zero, Perbill}; +use sp_std::{collections::btree_map::BTreeMap, rc::Rc, vec::Vec}; /// The type used as the threshold. /// /// Just some reading sugar; Must always be same as [`ExtendedBalance`]; @@ -60,10 +51,8 @@ pub fn standard_threshold( ) -> Threshold { weights .into_iter() - .fold(Threshold::zero(), |acc, elem| { - acc.saturating_add(elem) - }) - / committee_size.max(1) as Threshold + .fold(Threshold::zero(), |acc, elem| acc.saturating_add(elem)) / + committee_size.max(1) as Threshold } /// Check a solution to be PJR. @@ -74,7 +63,10 @@ pub fn pjr_check( all_candidates: Vec, all_voters: Vec<(AccountId, VoteWeight, Vec)>, ) -> Result<(), AccountId> { - let t = standard_threshold(supports.len(), all_voters.iter().map(|voter| voter.1 as ExtendedBalance)); + let t = standard_threshold( + supports.len(), + all_voters.iter().map(|voter| voter.1 as ExtendedBalance), + ); t_pjr_check(supports, all_candidates, all_voters, t) } @@ -101,7 +93,6 @@ pub fn pjr_check( /// needs to inspect un-elected candidates and edges, thus `all_candidates` and `all_voters`. /// /// [NPoS]: https://arxiv.org/pdf/2004.12990v1.pdf -// // ### Implementation Notes // // The paper uses mathematical notation, which priorities single-symbol names. For programmer ease, @@ -120,11 +111,7 @@ pub fn t_pjr_check( t: Threshold, ) -> Result<(), AccountId> { // First order of business: derive `(candidates, voters)` from `supports`. - let (candidates, voters) = prepare_pjr_input( - supports, - all_candidates, - all_voters, - ); + let (candidates, voters) = prepare_pjr_input(supports, all_candidates, all_voters); // compute with threshold t. pjr_check_core(candidates.as_ref(), voters.as_ref(), t) } @@ -141,7 +128,9 @@ pub fn pjr_check_core( t: Threshold, ) -> Result<(), AccountId> { let unelected = candidates.iter().filter(|c| !c.borrow().elected); - let maybe_max_pre_score = unelected.map(|c| (pre_score(Rc::clone(c), voters, t), c.borrow().who.clone())).max(); + let maybe_max_pre_score = unelected + .map(|c| (pre_score(Rc::clone(c), voters, t), c.borrow().who.clone())) + .max(); // if unelected is empty then the solution is indeed PJR. match maybe_max_pre_score { Some((max_pre_score, counter_example)) if max_pre_score >= t => Err(counter_example), @@ -165,7 +154,10 @@ pub fn validate_pjr_challenge( all_candidates: Vec, all_voters: Vec<(AccountId, VoteWeight, Vec)>, ) -> bool { - let threshold = standard_threshold(supports.len(), all_voters.iter().map(|voter| voter.1 as ExtendedBalance)); + let threshold = standard_threshold( + supports.len(), + all_voters.iter().map(|voter| voter.1 as ExtendedBalance), + ); validate_t_pjr_challenge(counter_example, supports, all_candidates, all_voters, threshold) } @@ -186,11 +178,7 @@ pub fn validate_t_pjr_challenge( all_voters: Vec<(AccountId, VoteWeight, Vec)>, threshold: Threshold, ) -> bool { - let (candidates, voters) = prepare_pjr_input( - supports, - all_candidates, - all_voters, - ); + let (candidates, voters) = prepare_pjr_input(supports, all_candidates, all_voters); validate_pjr_challenge_core(counter_example, &candidates, &voters, threshold) } @@ -219,10 +207,11 @@ fn validate_pjr_challenge_core( // unsafe code leveraging the existing `candidates_index`: allocate an uninitialized vector of // appropriate length, then copy in all the elements. We'd really prefer to avoid unsafe code // in the runtime, though. - let candidate = match candidates.iter().find(|candidate| candidate.borrow().who == counter_example) { - None => return false, - Some(candidate) => candidate.clone(), - }; + let candidate = + match candidates.iter().find(|candidate| candidate.borrow().who == counter_example) { + None => return false, + Some(candidate) => candidate.clone(), + }; pre_score(candidate, &voters, threshold) >= threshold } @@ -261,10 +250,14 @@ fn prepare_pjr_input( let mut candidates_index: BTreeMap = BTreeMap::new(); // dump the staked assignments in a voter-major map for faster access down the road. - let mut assignment_map: BTreeMap> = BTreeMap::new(); + let mut assignment_map: BTreeMap> = + BTreeMap::new(); for (winner_id, Support { voters, .. }) in supports.iter() { for (voter_id, support) in voters.iter() { - assignment_map.entry(voter_id.clone()).or_default().push((winner_id.clone(), *support)); + assignment_map + .entry(voter_id.clone()) + .or_default() + .push((winner_id.clone(), *support)); } } @@ -282,47 +275,56 @@ fn prepare_pjr_input( let supports: SupportMap = supports.iter().cloned().collect(); // collect all candidates and winners into a unified `Vec`. - let candidates = all_candidates.into_iter().enumerate().map(|(i, c)| { - candidates_index.insert(c.clone(), i); + let candidates = all_candidates + .into_iter() + .enumerate() + .map(|(i, c)| { + candidates_index.insert(c.clone(), i); - // set the backing value and elected flag if the candidate is among the winners. - let who = c; - let maybe_support = supports.get(&who); - let elected = maybe_support.is_some(); - let backed_stake = maybe_support.map(|support| support.total).unwrap_or_default(); + // set the backing value and elected flag if the candidate is among the winners. + let who = c; + let maybe_support = supports.get(&who); + let elected = maybe_support.is_some(); + let backed_stake = maybe_support.map(|support| support.total).unwrap_or_default(); - Candidate { who, elected, backed_stake, ..Default::default() }.to_ptr() - }).collect::>(); + Candidate { who, elected, backed_stake, ..Default::default() }.to_ptr() + }) + .collect::>(); // collect all voters into a unified Vec. - let voters = all_voters.into_iter().map(|(v, w, ts)| { - let mut edges: Vec> = Vec::with_capacity(ts.len()); - for t in ts { - if edges.iter().any(|e| e.who == t) { - // duplicate edge. - continue; - } - - if let Some(idx) = candidates_index.get(&t) { - // if this edge is among the assignments, set the weight as well. - let weight = assignment_map - .get(&v) - .and_then(|d| d.iter().find_map(|(x, y)| if x == &t { Some(y) } else { None })) - .cloned() - .unwrap_or_default(); - edges.push(Edge { - who: t, - candidate: Rc::clone(&candidates[*idx]), - weight, - ..Default::default() - }); + let voters = all_voters + .into_iter() + .map(|(v, w, ts)| { + let mut edges: Vec> = Vec::with_capacity(ts.len()); + for t in ts { + if edges.iter().any(|e| e.who == t) { + // duplicate edge. + continue + } + + if let Some(idx) = candidates_index.get(&t) { + // if this edge is among the assignments, set the weight as well. + let weight = assignment_map + .get(&v) + .and_then(|d| { + d.iter().find_map(|(x, y)| if x == &t { Some(y) } else { None }) + }) + .cloned() + .unwrap_or_default(); + edges.push(Edge { + who: t, + candidate: Rc::clone(&candidates[*idx]), + weight, + ..Default::default() + }); + } } - } - let who = v; - let budget: ExtendedBalance = w.into(); - Voter { who, budget, edges, ..Default::default() } - }).collect::>(); + let who = v; + let budget: ExtendedBalance = w.into(); + Voter { who, budget, edges, ..Default::default() } + }) + .collect::>(); (candidates, voters) } @@ -345,7 +347,6 @@ fn pre_score( .fold(Zero::zero(), |acc: ExtendedBalance, voter| acc.saturating_add(slack(voter, t))) } - /// The slack of a voter at a given state. /// /// The slack of each voter, with threshold `t` is the total amount of stake that this voter can @@ -363,8 +364,7 @@ fn slack(voter: &Voter, t: Threshold) -> Exte let candidate = edge.candidate.borrow(); if candidate.elected { let extra = - Perbill::one().min(Perbill::from_rational(t, candidate.backed_stake)) - * edge.weight; + Perbill::one().min(Perbill::from_rational(t, candidate.backed_stake)) * edge.weight; acc.saturating_add(extra) } else { // No slack generated here. @@ -383,13 +383,22 @@ mod tests { fn setup_voter(who: u32, votes: Vec<(u32, u128, bool)>) -> Voter { let mut voter = Voter::new(who); let mut budget = 0u128; - let candidates = votes.into_iter().map(|(t, w, e)| { - budget += w; - Candidate { who: t, elected: e, backed_stake: w, ..Default::default() } - }).collect::>(); - let edges = candidates.into_iter().map(|c| - Edge { who: c.who, weight: c.backed_stake, candidate: c.to_ptr(), ..Default::default() } - ).collect::>(); + let candidates = votes + .into_iter() + .map(|(t, w, e)| { + budget += w; + Candidate { who: t, elected: e, backed_stake: w, ..Default::default() } + }) + .collect::>(); + let edges = candidates + .into_iter() + .map(|c| Edge { + who: c.who, + weight: c.backed_stake, + candidate: c.to_ptr(), + ..Default::default() + }) + .collect::>(); voter.edges = edges; voter.budget = budget; voter @@ -412,7 +421,6 @@ mod tests { assert_eq!(slack(&voter, 17), 3); assert_eq!(slack(&voter, 10), 10); assert_eq!(slack(&voter, 5), 20); - } #[test] @@ -440,15 +448,11 @@ mod tests { ]; // tuples in voters vector are (AccountId, Balance) let supports: Supports = vec![ - (20, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), - (40, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), ]; - let (candidates, voters) = prepare_pjr_input( - &supports, - all_candidates, - all_voters, - ); + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); // elected flag and backing must be set correctly assert_eq!( @@ -467,7 +471,8 @@ mod tests { v.who, v.budget, v.edges.iter().map(|e| (e.who, e.weight)).collect::>(), - )).collect::>(), + )) + .collect::>(), vec![ (1, 10, vec![(10, 0), (20, 5), (30, 0), (40, 5)]), (2, 20, vec![(10, 0), (20, 10), (30, 0), (40, 10)]), @@ -498,15 +503,11 @@ mod tests { ]; // tuples in voters vector are (AccountId, Balance) let supports: Supports = vec![ - (20, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), - (40, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), ]; - let (candidates, voters) = prepare_pjr_input( - &supports, - all_candidates, - all_voters, - ); + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); find_threshold_phase_change_for_scenario(candidates, voters); } @@ -521,15 +522,11 @@ mod tests { ]; // tuples in voters vector are (AccountId, Balance) let supports: Supports = vec![ - (20, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), - (40, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), ]; - let (candidates, voters) = prepare_pjr_input( - &supports, - all_candidates, - all_voters, - ); + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); find_threshold_phase_change_for_scenario(candidates, voters); } @@ -544,22 +541,18 @@ mod tests { ]; // tuples in voters vector are (AccountId, Balance) let supports: Supports = vec![ - (20, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), - (40, Support { total: 15, voters: vec![(1, 5), (2, 10)]}), + (20, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), + (40, Support { total: 15, voters: vec![(1, 5), (2, 10)] }), ]; - let (candidates, voters) = prepare_pjr_input( - &supports, - all_candidates, - all_voters, - ); + let (candidates, voters) = prepare_pjr_input(&supports, all_candidates, all_voters); find_threshold_phase_change_for_scenario(candidates, voters); } fn find_threshold_phase_change_for_scenario( candidates: Vec>, - voters: Vec> + voters: Vec>, ) -> Threshold { let mut threshold = 1; let mut prev_threshold = 0; @@ -567,7 +560,9 @@ mod tests { // find the binary range containing the threshold beyond which the PJR check succeeds while pjr_check_core(&candidates, &voters, threshold).is_err() { prev_threshold = threshold; - threshold = threshold.checked_mul(2).expect("pjr check must fail before we run out of capacity in u128"); + threshold = threshold + .checked_mul(2) + .expect("pjr check must fail before we run out of capacity in u128"); } // now binary search within that range to find the phase threshold @@ -595,7 +590,7 @@ mod tests { unexpected_successes.push(t); } } - for t in high_bound..(high_bound*2) { + for t in high_bound..(high_bound * 2) { if pjr_check_core(&candidates, &voters, t).is_err() { unexpected_failures.push(t); } diff --git a/substrate/primitives/npos-elections/src/reduce.rs b/substrate/primitives/npos-elections/src/reduce.rs index a34f1612ca1a5eea232e38731332cfd017b13528..4290743832a57c5d5d1feac32eebba28decbabf1 100644 --- a/substrate/primitives/npos-elections/src/reduce.rs +++ b/substrate/primitives/npos-elections/src/reduce.rs @@ -47,13 +47,15 @@ //! //! 1. -use crate::node::{Node, NodeId, NodeRef, NodeRole}; -use crate::{ExtendedBalance, IdentifierT, StakedAssignment}; +use crate::{ + node::{Node, NodeId, NodeRef, NodeRole}, + ExtendedBalance, IdentifierT, StakedAssignment, +}; use sp_arithmetic::traits::{Bounded, Zero}; use sp_std::{ collections::btree_map::{BTreeMap, Entry::*}, - vec, prelude::*, + vec, }; /// Map type used for reduce_4. Can be easily swapped with HashMap. @@ -63,7 +65,7 @@ type Map = BTreeMap<(A, A), A>; fn combinations_2(input: &[T]) -> Vec<(T, T)> { let n = input.len(); if n < 2 { - return Default::default(); + return Default::default() } let mut comb = Vec::with_capacity(n * (n - 1) / 2); @@ -126,7 +128,7 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { match combination_map.entry((v1.clone(), v2.clone())) { Vacant(entry) => { entry.insert(who.clone()); - } + }, Occupied(mut entry) => { let other_who = entry.get_mut(); @@ -141,29 +143,30 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { .filter(|(t, _)| *t == v1 || *t == v2) .count() != 2 { - continue; + continue } // check if other_who voted for the same pair v1, v2. let maybe_other_assignments = assignments.iter().find(|a| a.who == *other_who); if maybe_other_assignments.is_none() { - continue; + continue } let other_assignment = maybe_other_assignments.expect("value is checked to be 'Some'"); // Collect potential cycle votes - let mut other_cycle_votes = other_assignment - .distribution - .iter() - .filter_map(|(t, w)| { - if *t == v1 || *t == v2 { - Some((t.clone(), *w)) - } else { - None - } - }) - .collect::>(); + let mut other_cycle_votes = + other_assignment + .distribution + .iter() + .filter_map(|(t, w)| { + if *t == v1 || *t == v2 { + Some((t.clone(), *w)) + } else { + None + } + }) + .collect::>(); let other_votes_count = other_cycle_votes.len(); @@ -175,21 +178,18 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { if other_votes_count < 2 { // This is not a cycle. Replace and continue. *other_who = who.clone(); - continue; + continue } else if other_votes_count == 2 { // This is a cycle. let mut who_cycle_votes: Vec<(A, ExtendedBalance)> = Vec::with_capacity(2); - assignments[assignment_index] - .distribution - .iter() - .for_each(|(t, w)| { - if *t == v1 || *t == v2 { - who_cycle_votes.push((t.clone(), *w)); - } - }); + assignments[assignment_index].distribution.iter().for_each(|(t, w)| { + if *t == v1 || *t == v2 { + who_cycle_votes.push((t.clone(), *w)); + } + }); if who_cycle_votes.len() != 2 { - continue; + continue } // Align the targets similarly. This helps with the circulation below. @@ -240,53 +240,39 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { // apply changes let mut remove_indices: Vec = Vec::with_capacity(1); increase_indices.into_iter().for_each(|i| { - let voter = if i < 2 { - who.clone() - } else { - other_who.clone() - }; + let voter = if i < 2 { who.clone() } else { other_who.clone() }; // Note: so this is pretty ambiguous. We should only look for one // assignment that meets this criteria and if we find multiple then that // is a corrupt input. Same goes for the next block. - assignments - .iter_mut() - .filter(|a| a.who == voter) - .for_each(|ass| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == cycle[i].0) - .map(|idx| { - let next_value = - ass.distribution[idx].1.saturating_add(min_value); - ass.distribution[idx].1 = next_value; - }); - }); + assignments.iter_mut().filter(|a| a.who == voter).for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_add(min_value); + ass.distribution[idx].1 = next_value; + }); + }); }); decrease_indices.into_iter().for_each(|i| { - let voter = if i < 2 { - who.clone() - } else { - other_who.clone() - }; - assignments - .iter_mut() - .filter(|a| a.who == voter) - .for_each(|ass| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == cycle[i].0) - .map(|idx| { - let next_value = - ass.distribution[idx].1.saturating_sub(min_value); - if next_value.is_zero() { - ass.distribution.remove(idx); - remove_indices.push(i); - num_changed += 1; - } else { - ass.distribution[idx].1 = next_value; - } - }); - }); + let voter = if i < 2 { who.clone() } else { other_who.clone() }; + assignments.iter_mut().filter(|a| a.who == voter).for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_sub(min_value); + if next_value.is_zero() { + ass.distribution.remove(idx); + remove_indices.push(i); + num_changed += 1; + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); }); // remove either one of them. @@ -297,21 +283,21 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { match (who_removed, other_removed) { (false, true) => { *other_who = who.clone(); - } + }, (true, false) => { // nothing, other_who can stay there. - } + }, (true, true) => { // remove and don't replace entry.remove(); - } + }, (false, false) => { // Neither of the edges was removed? impossible. panic!("Duplicate voter (or other corrupt input)."); - } + }, } } - } + }, } } } @@ -350,7 +336,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let maybe_dist = assignments[assignment_index].distribution.get(dist_index); if maybe_dist.is_none() { // The rest of this loop is moot. - break; + break } let (target, _) = maybe_dist.expect("Value checked to be some").clone(); @@ -377,19 +363,19 @@ fn reduce_all(assignments: &mut Vec>) -> u32 (false, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue; - } + continue + }, (false, true) => { Node::set_parent_of(&voter_node, &target_node); dist_index += 1; - continue; - } + continue + }, (true, false) => { Node::set_parent_of(&target_node, &voter_node); dist_index += 1; - continue; - } - (true, true) => { /* don't continue and execute the rest */ } + continue + }, + (true, true) => { /* don't continue and execute the rest */ }, }; let (voter_root, voter_root_path) = Node::root(&voter_node); @@ -405,10 +391,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 // because roots are the same. #[cfg(feature = "std")] - debug_assert_eq!( - target_root_path.last().unwrap(), - voter_root_path.last().unwrap() - ); + debug_assert_eq!(target_root_path.last().unwrap(), voter_root_path.last().unwrap()); debug_assert!(common_count > 0); // cycle part of each path will be `path[path.len() - common_count - 1 : 0]` @@ -602,7 +585,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = voter_root_path[i].clone().borrow().id.who.clone(); let next = voter_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break; + break } Node::set_parent_of(&voter_root_path[i + 1], &voter_root_path[i]); } @@ -613,7 +596,7 @@ fn reduce_all(assignments: &mut Vec>) -> u32 let current = target_root_path[i].clone().borrow().id.who.clone(); let next = target_root_path[i + 1].clone().borrow().id.who.clone(); if min_edge.contains(¤t) && min_edge.contains(&next) { - break; + break } Node::set_parent_of(&target_root_path[i + 1], &target_root_path[i]); } @@ -663,9 +646,9 @@ mod tests { #[test] fn merging_works() { - // D <-- A <-- B <-- C + // D <-- A <-- B <-- C // - // F <-- E + // F <-- E let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); @@ -682,17 +665,17 @@ mod tests { let path2 = vec![e.clone(), f.clone()]; merge(path1, path2); - // D <-- A <-- B <-- C - // | - // F --> E --> --> + // D <-- A <-- B <-- C + // | + // F --> E --> --> assert_eq!(e.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c } #[test] fn merge_with_len_one() { - // D <-- A <-- B <-- C + // D <-- A <-- B <-- C // - // F <-- E + // F <-- E let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); @@ -707,9 +690,9 @@ mod tests { let path2 = vec![f.clone()]; merge(path1, path2); - // D <-- A <-- B <-- C - // | - // F --> --> + // D <-- A <-- B <-- C + // | + // F --> --> assert_eq!(f.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c } @@ -718,14 +701,8 @@ mod tests { use super::*; let assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 25), (20, 75)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 50), (20, 50)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 25), (20, 75)] }, + StakedAssignment { who: 2, distribution: vec![(10, 50), (20, 50)] }, ]; let mut new_assignments = assignments.clone(); @@ -735,14 +712,8 @@ mod tests { assert_eq!( new_assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(20, 100),], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 75), (20, 25),], - }, + StakedAssignment { who: 1, distribution: vec![(20, 100),] }, + StakedAssignment { who: 2, distribution: vec![(10, 75), (20, 25),] }, ], ); } @@ -750,26 +721,11 @@ mod tests { #[test] fn basic_reduce_all_cycles_works() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 3, distribution: vec![(20, 15), (40, 15)] }, + StakedAssignment { who: 4, distribution: vec![(20, 10), (30, 10), (40, 20)] }, + StakedAssignment { who: 5, distribution: vec![(20, 20), (30, 10), (40, 20)] }, ]; assert_eq!(3, reduce_all(&mut assignments)); @@ -777,26 +733,11 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10),] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5),] }, + StakedAssignment { who: 3, distribution: vec![(20, 30),] }, + StakedAssignment { who: 4, distribution: vec![(40, 40),] }, + StakedAssignment { who: 5, distribution: vec![(20, 15), (30, 20), (40, 15),] }, ], ) } @@ -804,26 +745,11 @@ mod tests { #[test] fn basic_reduce_works() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 3, distribution: vec![(20, 15), (40, 15)] }, + StakedAssignment { who: 4, distribution: vec![(20, 10), (30, 10), (40, 20)] }, + StakedAssignment { who: 5, distribution: vec![(20, 20), (30, 10), (40, 20)] }, ]; assert_eq!(3, reduce(&mut assignments)); @@ -831,26 +757,11 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10),] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5),] }, + StakedAssignment { who: 3, distribution: vec![(20, 30),] }, + StakedAssignment { who: 4, distribution: vec![(40, 40),] }, + StakedAssignment { who: 5, distribution: vec![(20, 15), (30, 20), (40, 15),] }, ], ) } @@ -858,35 +769,14 @@ mod tests { #[test] fn should_deal_with_self_vote() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 3, distribution: vec![(20, 15), (40, 15)] }, + StakedAssignment { who: 4, distribution: vec![(20, 10), (30, 10), (40, 20)] }, + StakedAssignment { who: 5, distribution: vec![(20, 20), (30, 10), (40, 20)] }, // self vote from 10 and 20 to itself. - StakedAssignment { - who: 10, - distribution: vec![(10, 100)], - }, - StakedAssignment { - who: 20, - distribution: vec![(20, 200)], - }, + StakedAssignment { who: 10, distribution: vec![(10, 100)] }, + StakedAssignment { who: 20, distribution: vec![(20, 200)] }, ]; assert_eq!(3, reduce(&mut assignments)); @@ -894,35 +784,14 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10),] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 5),] }, + StakedAssignment { who: 3, distribution: vec![(20, 30),] }, + StakedAssignment { who: 4, distribution: vec![(40, 40),] }, + StakedAssignment { who: 5, distribution: vec![(20, 15), (30, 20), (40, 15),] }, // should stay untouched. - StakedAssignment { - who: 10, - distribution: vec![(10, 100)] - }, - StakedAssignment { - who: 20, - distribution: vec![(20, 200)] - }, + StakedAssignment { who: 10, distribution: vec![(10, 100)] }, + StakedAssignment { who: 20, distribution: vec![(20, 200)] }, ], ) } @@ -930,55 +799,23 @@ mod tests { #[test] fn reduce_3_common_votes_same_weight() { let mut assignments = vec![ - StakedAssignment { - who: 4, - distribution: vec![ - ( - 1000000, - 100, - ), - ( - 1000002, - 100, - ), - ( - 1000004, - 100, - ), - ], - }, - StakedAssignment { - who: 5, - distribution: vec![ - ( - 1000000, - 100, - ), - ( - 1000002, - 100, - ), - ( - 1000004, - 100, - ), - ], - }, - ]; + StakedAssignment { + who: 4, + distribution: vec![(1000000, 100), (1000002, 100), (1000004, 100)], + }, + StakedAssignment { + who: 5, + distribution: vec![(1000000, 100), (1000002, 100), (1000004, 100)], + }, + ]; reduce_4(&mut assignments); assert_eq!( assignments, vec![ - StakedAssignment { - who: 4, - distribution: vec![(1000000, 200,), (1000004, 100,),], - }, - StakedAssignment { - who: 5, - distribution: vec![(1000002, 200,), (1000004, 100,),], - }, + StakedAssignment { who: 4, distribution: vec![(1000000, 200,), (1000004, 100,),] }, + StakedAssignment { who: 5, distribution: vec![(1000002, 200,), (1000004, 100,),] }, ], ) } @@ -987,18 +824,9 @@ mod tests { #[should_panic] fn reduce_panics_on_duplicate_voter() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10), (20, 10)], - }, - StakedAssignment { - who: 1, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 15)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 10), (20, 10)] }, + StakedAssignment { who: 1, distribution: vec![(10, 15), (20, 5)] }, + StakedAssignment { who: 2, distribution: vec![(10, 15), (20, 15)] }, ]; reduce(&mut assignments); @@ -1007,10 +835,7 @@ mod tests { #[test] fn should_deal_with_duplicates_target() { let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 15), (20, 5)], - }, + StakedAssignment { who: 1, distribution: vec![(10, 15), (20, 5)] }, StakedAssignment { who: 2, distribution: vec![ @@ -1029,10 +854,7 @@ mod tests { assert_eq!( assignments, vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 20),], - }, + StakedAssignment { who: 1, distribution: vec![(10, 20),] }, StakedAssignment { who: 2, distribution: vec![ diff --git a/substrate/primitives/npos-elections/src/tests.rs b/substrate/primitives/npos-elections/src/tests.rs index 8cadff949b6f2207a04bff40f3ed608e34e8e615..ee67095307c2d8fa3bbe8ad5e8cbd41ed096817d 100644 --- a/substrate/primitives/npos-elections/src/tests.rs +++ b/substrate/primitives/npos-elections/src/tests.rs @@ -19,22 +19,18 @@ use crate::{ balancing, helpers::*, is_score_better, mock::*, seq_phragmen, seq_phragmen_core, setup_inputs, - to_support_map, to_supports, Assignment, CompactSolution, ElectionResult, ExtendedBalance, - IndexAssignment, StakedAssignment, Support, Voter, EvaluateSupport, + to_support_map, to_supports, Assignment, CompactSolution, ElectionResult, EvaluateSupport, + ExtendedBalance, IndexAssignment, StakedAssignment, Support, Voter, }; use rand::{self, SeedableRng}; use sp_arithmetic::{PerU16, Perbill, Percent, Permill}; -use substrate_test_utils::assert_eq_uvec; use std::convert::TryInto; +use substrate_test_utils::assert_eq_uvec; #[test] fn float_phragmen_poc_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30), (1, 0), (2, 0), (3, 0)]); let mut phragmen_result = elect_float(2, candidates, voters, &stake_of).unwrap(); let winners = phragmen_result.clone().winners; @@ -43,11 +39,7 @@ fn float_phragmen_poc_works() { assert_eq_uvec!(winners, vec![(2, 40), (3, 50)]); assert_eq_uvec!( assignments, - vec![ - (10, vec![(2, 1.0)]), - (20, vec![(3, 1.0)]), - (30, vec![(2, 0.5), (3, 0.5)]), - ] + vec![(10, vec![(2, 1.0)]), (20, vec![(3, 1.0)]), (30, vec![(2, 0.5), (3, 0.5)]),] ); let mut support_map = build_support_map_float(&mut phragmen_result, &stake_of); @@ -76,11 +68,7 @@ fn float_phragmen_poc_works() { #[test] fn phragmen_core_test_without_edges() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 10, vec![]), - (20, 20, vec![]), - (30, 30, vec![]), - ]; + let voters = vec![(10, 10, vec![]), (20, 20, vec![]), (30, 30, vec![])]; let (candidates, voters) = setup_inputs(candidates, voters); @@ -104,23 +92,16 @@ fn phragmen_core_test_without_edges() { c_ptr.borrow().elected, c_ptr.borrow().round, c_ptr.borrow().backed_stake, - )).collect::>(), - vec![ - (1, false, 0, 0), - (2, false, 0, 0), - (3, false, 0, 0), - ] + )) + .collect::>(), + vec![(1, false, 0, 0), (2, false, 0, 0), (3, false, 0, 0),] ); } #[test] fn phragmen_core_poc_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 10, vec![1, 2]), - (20, 20, vec![1, 3]), - (30, 30, vec![2, 3]), - ]; + let voters = vec![(10, 10, vec![1, 2]), (20, 20, vec![1, 3]), (30, 30, vec![2, 3])]; let (candidates, voters) = setup_inputs(candidates, voters); let (candidates, voters) = seq_phragmen_core(2, candidates, voters).unwrap(); @@ -134,11 +115,7 @@ fn phragmen_core_poc_works() { (v.edges.iter().map(|e| (e.who, e.weight)).collect::>()), )) .collect::>(), - vec![ - (10, 10, vec![(2, 10)]), - (20, 20, vec![(3, 20)]), - (30, 30, vec![(2, 15), (3, 15)]), - ] + vec![(10, 10, vec![(2, 10)]), (20, 20, vec![(3, 20)]), (30, 30, vec![(2, 15), (3, 15)]),] ); assert_eq!( @@ -149,12 +126,9 @@ fn phragmen_core_poc_works() { c_ptr.borrow().elected, c_ptr.borrow().round, c_ptr.borrow().backed_stake, - )).collect::>(), - vec![ - (1, false, 0, 0), - (2, true, 1, 25), - (3, true, 0, 35), - ] + )) + .collect::>(), + vec![(1, false, 0, 0), (2, true, 1, 25), (3, true, 0, 35),] ); } @@ -203,7 +177,8 @@ fn balancing_core_works() { c_ptr.borrow().elected, c_ptr.borrow().round, c_ptr.borrow().backed_stake, - )).collect::>(), + )) + .collect::>(), vec![ (1, true, 1, 37), (2, true, 2, 38), @@ -220,40 +195,30 @@ fn voter_normalize_ops_works() { use sp_std::{cell::RefCell, rc::Rc}; // normalize { - let c1 = Candidate { who: 10, elected: false ,..Default::default() }; - let c2 = Candidate { who: 20, elected: false ,..Default::default() }; - let c3 = Candidate { who: 30, elected: false ,..Default::default() }; + let c1 = Candidate { who: 10, elected: false, ..Default::default() }; + let c2 = Candidate { who: 20, elected: false, ..Default::default() }; + let c3 = Candidate { who: 30, elected: false, ..Default::default() }; let e1 = Edge { candidate: Rc::new(RefCell::new(c1)), weight: 30, ..Default::default() }; let e2 = Edge { candidate: Rc::new(RefCell::new(c2)), weight: 33, ..Default::default() }; let e3 = Edge { candidate: Rc::new(RefCell::new(c3)), weight: 30, ..Default::default() }; - let mut v = Voter { - who: 1, - budget: 100, - edges: vec![e1, e2, e3], - ..Default::default() - }; + let mut v = Voter { who: 1, budget: 100, edges: vec![e1, e2, e3], ..Default::default() }; v.try_normalize().unwrap(); assert_eq!(v.edges.iter().map(|e| e.weight).collect::>(), vec![34, 33, 33]); } // // normalize_elected { - let c1 = Candidate { who: 10, elected: false ,..Default::default() }; - let c2 = Candidate { who: 20, elected: true ,..Default::default() }; - let c3 = Candidate { who: 30, elected: true ,..Default::default() }; + let c1 = Candidate { who: 10, elected: false, ..Default::default() }; + let c2 = Candidate { who: 20, elected: true, ..Default::default() }; + let c3 = Candidate { who: 30, elected: true, ..Default::default() }; let e1 = Edge { candidate: Rc::new(RefCell::new(c1)), weight: 30, ..Default::default() }; let e2 = Edge { candidate: Rc::new(RefCell::new(c2)), weight: 33, ..Default::default() }; let e3 = Edge { candidate: Rc::new(RefCell::new(c3)), weight: 30, ..Default::default() }; - let mut v = Voter { - who: 1, - budget: 100, - edges: vec![e1, e2, e3], - ..Default::default() - }; + let mut v = Voter { who: 1, budget: 100, edges: vec![e1, e2, e3], ..Default::default() }; v.try_normalize_elected().unwrap(); assert_eq!(v.edges.iter().map(|e| e.weight).collect::>(), vec![30, 34, 66]); @@ -263,37 +228,31 @@ fn voter_normalize_ops_works() { #[test] fn phragmen_poc_works() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 25), (3, 35)]); assert_eq_uvec!( assignments, vec![ - Assignment { - who: 10u64, - distribution: vec![(2, Perbill::from_percent(100))], - }, - Assignment { - who: 20, - distribution: vec![(3, Perbill::from_percent(100))], - }, + Assignment { who: 10u64, distribution: vec![(2, Perbill::from_percent(100))] }, + Assignment { who: 20, distribution: vec![(3, Perbill::from_percent(100))] }, Assignment { who: 30, distribution: vec![ - (2, Perbill::from_percent(100/2)), - (3, Perbill::from_percent(100/2)), + (2, Perbill::from_percent(100 / 2)), + (3, Perbill::from_percent(100 / 2)), ], }, ] @@ -306,21 +265,9 @@ fn phragmen_poc_works() { assert_eq_uvec!( staked, vec![ - StakedAssignment { - who: 10u64, - distribution: vec![(2, 10)], - }, - StakedAssignment { - who: 20, - distribution: vec![(3, 20)], - }, - StakedAssignment { - who: 30, - distribution: vec![ - (2, 15), - (3, 15), - ], - }, + StakedAssignment { who: 10u64, distribution: vec![(2, 10)] }, + StakedAssignment { who: 20, distribution: vec![(3, 20)] }, + StakedAssignment { who: 30, distribution: vec![(2, 15), (3, 15),] }, ] ); @@ -337,32 +284,26 @@ fn phragmen_poc_works() { #[test] fn phragmen_poc_works_with_balancing() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), Some((4, 0)), - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 30), (3, 30)]); assert_eq_uvec!( assignments, vec![ - Assignment { - who: 10u64, - distribution: vec![(2, Perbill::from_percent(100))], - }, - Assignment { - who: 20, - distribution: vec![(3, Perbill::from_percent(100))], - }, + Assignment { who: 10u64, distribution: vec![(2, Perbill::from_percent(100))] }, + Assignment { who: 20, distribution: vec![(3, Perbill::from_percent(100))] }, Assignment { who: 30, distribution: vec![ @@ -380,21 +321,9 @@ fn phragmen_poc_works_with_balancing() { assert_eq_uvec!( staked, vec![ - StakedAssignment { - who: 10u64, - distribution: vec![(2, 10)], - }, - StakedAssignment { - who: 20, - distribution: vec![(3, 20)], - }, - StakedAssignment { - who: 30, - distribution: vec![ - (2, 20), - (3, 10), - ], - }, + StakedAssignment { who: 10u64, distribution: vec![(2, 10)] }, + StakedAssignment { who: 20, distribution: vec![(3, 20)] }, + StakedAssignment { who: 30, distribution: vec![(2, 20), (3, 10),] }, ] ); @@ -408,22 +337,12 @@ fn phragmen_poc_works_with_balancing() { ); } - #[test] fn phragmen_poc_2_works() { let candidates = vec![10, 20, 30]; - let voters = vec![ - (2, vec![10, 20, 30]), - (4, vec![10, 20, 40]), - ]; - let stake_of = create_stake_of(&[ - (10, 1000), - (20, 1000), - (30, 1000), - (40, 1000), - (2, 500), - (4, 500), - ]); + let voters = vec![(2, vec![10, 20, 30]), (4, vec![10, 20, 40])]; + let stake_of = + create_stake_of(&[(10, 1000), (20, 1000), (30, 1000), (40, 1000), (2, 500), (4, 500)]); run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); @@ -434,17 +353,8 @@ fn phragmen_poc_2_works() { #[test] fn phragmen_poc_3_works() { let candidates = vec![10, 20, 30]; - let voters = vec![ - (2, vec![10, 20, 30]), - (4, vec![10, 20, 40]), - ]; - let stake_of = create_stake_of(&[ - (10, 1000), - (20, 1000), - (30, 1000), - (2, 50), - (4, 1000), - ]); + let voters = vec![(2, vec![10, 20, 30]), (4, vec![10, 20, 40])]; + let stake_of = create_stake_of(&[(10, 1000), (20, 1000), (30, 1000), (2, 50), (4, 1000)]); run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2); @@ -473,7 +383,8 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(1, 18446744073709551614u128), (5, 18446744073709551613u128)]); assert_eq!(assignments.len(), 2); @@ -483,17 +394,14 @@ fn phragmen_accuracy_on_large_scale_only_candidates() { #[test] fn phragmen_accuracy_on_large_scale_voters_and_candidates() { let candidates = vec![1, 2, 3, 4, 5]; - let mut voters = vec![ - (13, vec![1, 3, 5]), - (14, vec![2, 4]), - ]; + let mut voters = vec![(13, vec![1, 3, 5]), (14, vec![2, 4])]; voters.extend(auto_generate_self_voters(&candidates)); let stake_of = create_stake_of(&[ - (1, (u64::MAX - 1).into()), - (2, (u64::MAX - 4).into()), - (3, (u64::MAX - 5).into()), - (4, (u64::MAX - 3).into()), - (5, (u64::MAX - 2).into()), + (1, (u64::MAX - 1).into()), + (2, (u64::MAX - 4).into()), + (3, (u64::MAX - 5).into()), + (4, (u64::MAX - 3).into()), + (5, (u64::MAX - 2).into()), (13, (u64::MAX - 10).into()), (14, u64::MAX.into()), ]); @@ -501,31 +409,23 @@ fn phragmen_accuracy_on_large_scale_voters_and_candidates() { let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 36893488147419103226u128), (1, 36893488147419103219u128)]); assert_eq!( assignments, vec![ - Assignment { - who: 13u64, - distribution: vec![(1, Perbill::one())], - }, - Assignment { - who: 14, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 1, - distribution: vec![(1, Perbill::one())], - }, - Assignment { - who: 2, - distribution: vec![(2, Perbill::one())], - }, + Assignment { who: 13u64, distribution: vec![(1, Perbill::one())] }, + Assignment { who: 14, distribution: vec![(2, Perbill::one())] }, + Assignment { who: 1, distribution: vec![(1, Perbill::one())] }, + Assignment { who: 2, distribution: vec![(2, Perbill::one())] }, ] ); @@ -536,19 +436,18 @@ fn phragmen_accuracy_on_large_scale_voters_and_candidates() { fn phragmen_accuracy_on_small_scale_self_vote() { let candidates = vec![40, 10, 20, 30]; let voters = auto_generate_self_voters(&candidates); - let stake_of = create_stake_of(&[ - (40, 0), - (10, 1), - (20, 2), - (30, 1), - ]); + let stake_of = create_stake_of(&[(40, 0), (10, 1), (20, 2), (30, 1)]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 3, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); check_assignments_sum(&assignments); @@ -557,12 +456,7 @@ fn phragmen_accuracy_on_small_scale_self_vote() { #[test] fn phragmen_accuracy_on_small_scale_no_self_vote() { let candidates = vec![40, 10, 20, 30]; - let voters = vec![ - (1, vec![10]), - (2, vec![20]), - (3, vec![30]), - (4, vec![40]), - ]; + let voters = vec![(1, vec![10]), (2, vec![20]), (3, vec![30]), (4, vec![40])]; let stake_of = create_stake_of(&[ (40, 1000), // don't care (10, 1000), // don't care @@ -577,27 +471,28 @@ fn phragmen_accuracy_on_small_scale_no_self_vote() { let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 3, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); check_assignments_sum(&assignments); - } #[test] fn phragmen_large_scale_test() { - let candidates = vec![2, 4, 6, 8, 10, 12, 14, 16 ,18, 20, 22, 24]; - let mut voters = vec![ - (50, vec![2, 4, 6, 8, 10, 12, 14, 16 ,18, 20, 22, 24]), - ]; + let candidates = vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]; + let mut voters = vec![(50, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24])]; voters.extend(auto_generate_self_voters(&candidates)); let stake_of = create_stake_of(&[ - (2, 1), - (4, 100), - (6, 1000000), - (8, 100000000001000), + (2, 1), + (4, 100), + (6, 1000000), + (8, 100000000001000), (10, 100000000002000), (12, 100000000003000), (14, 400000000000000), @@ -612,9 +507,13 @@ fn phragmen_large_scale_test() { let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(to_without_backing(winners.clone()), vec![24, 22]); check_assignments_sum(&assignments); @@ -629,18 +528,19 @@ fn phragmen_large_scale_test_2() { let mut voters = vec![(50, vec![2, 4])]; voters.extend(auto_generate_self_voters(&candidates)); - let stake_of = create_stake_of(&[ - (2, c_budget.into()), - (4, c_budget.into()), - (50, nom_budget.into()), - ]); + let stake_of = + create_stake_of(&[(2, c_budget.into()), (4, c_budget.into()), (50, nom_budget.into())]); let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq_uvec!(winners, vec![(2, 500000000005000000u128), (4, 500000000003000000)]); @@ -654,14 +554,8 @@ fn phragmen_large_scale_test_2() { (4, Perbill::from_parts(500000000)), ], }, - Assignment { - who: 2, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 4, - distribution: vec![(4, Perbill::one())], - }, + Assignment { who: 2, distribution: vec![(2, Perbill::one())] }, + Assignment { who: 4, distribution: vec![(4, Perbill::one())] }, ], ); @@ -688,7 +582,6 @@ fn phragmen_linear_equalize() { (51, 1000), (61, 1000), (71, 1000), - (2, 2000), (4, 1000), (6, 1000), @@ -704,58 +597,48 @@ fn phragmen_linear_equalize() { #[test] fn elect_has_no_entry_barrier() { let candidates = vec![10, 20, 30]; - let voters = vec![ - (1, vec![10]), - (2, vec![20]), - ]; - let stake_of = create_stake_of(&[ - (1, 10), - (2, 10), - ]); + let voters = vec![(1, vec![10]), (2, vec![20])]; + let stake_of = create_stake_of(&[(1, 10), (2, 10)]); let ElectionResult { winners, assignments: _ } = seq_phragmen::<_, Perbill>( 3, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); // 30 is elected with stake 0. The caller is responsible for stripping this. - assert_eq_uvec!(winners, vec![ - (10, 10), - (20, 10), - (30, 0), - ]); + assert_eq_uvec!(winners, vec![(10, 10), (20, 10), (30, 0),]); } #[test] fn phragmen_self_votes_should_be_kept() { let candidates = vec![5, 10, 20, 30]; - let voters = vec![ - (5, vec![5]), - (10, vec![10]), - (20, vec![20]), - (1, vec![10, 20]) - ]; - let stake_of = create_stake_of(&[ - (5, 5), - (10, 10), - (20, 20), - (1, 8), - ]); + let voters = vec![(5, vec![5]), (10, vec![10]), (20, vec![20]), (1, vec![10, 20])]; + let stake_of = create_stake_of(&[(5, 5), (10, 10), (20, 20), (1, 8)]); let result = seq_phragmen::<_, Perbill>( 2, candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), None, - ).unwrap(); + ) + .unwrap(); assert_eq!(result.winners, vec![(20, 24), (10, 14)]); assert_eq_uvec!( result.assignments, vec![ - Assignment { who: 1, distribution: vec![ + Assignment { + who: 1, + distribution: vec![ (10, Perbill::from_percent(50)), (20, Perbill::from_percent(50)), ] @@ -783,18 +666,10 @@ fn phragmen_self_votes_should_be_kept() { #[test] fn duplicate_target_is_ignored() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 100, vec![1, 1, 2, 3]), - (20, 100, vec![2, 3]), - (30, 50, vec![1, 1, 2]), - ]; + let voters = vec![(10, 100, vec![1, 1, 2, 3]), (20, 100, vec![2, 3]), (30, 50, vec![1, 1, 2])]; - let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( - 2, - candidates, - voters, - None, - ).unwrap(); + let ElectionResult { winners, assignments } = + seq_phragmen::<_, Perbill>(2, candidates, voters, None).unwrap(); let winners = to_without_backing(winners); assert_eq!(winners, vec![(2), (3)]); @@ -803,28 +678,17 @@ fn duplicate_target_is_ignored() { .into_iter() .map(|x| (x.who, x.distribution.into_iter().map(|(w, _)| w).collect::>())) .collect::>(), - vec![ - (10, vec![2, 3]), - (20, vec![2, 3]), - (30, vec![2]), - ], + vec![(10, vec![2, 3]), (20, vec![2, 3]), (30, vec![2]),], ); } #[test] fn duplicate_target_is_ignored_when_winner() { let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, 100, vec![1, 1, 2, 3]), - (20, 100, vec![1, 2]), - ]; + let voters = vec![(10, 100, vec![1, 1, 2, 3]), (20, 100, vec![1, 2])]; - let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( - 2, - candidates, - voters, - None, - ).unwrap(); + let ElectionResult { winners, assignments } = + seq_phragmen::<_, Perbill>(2, candidates, voters, None).unwrap(); let winners = to_without_backing(winners); assert_eq!(winners, vec![1, 2]); @@ -833,10 +697,7 @@ fn duplicate_target_is_ignored_when_winner() { .into_iter() .map(|x| (x.who, x.distribution.into_iter().map(|(w, _)| w).collect::>())) .collect::>(), - vec![ - (10, vec![1, 2]), - (20, vec![1, 2]), - ], + vec![(10, vec![1, 2]), (20, vec![1, 2]),], ); } @@ -846,10 +707,7 @@ fn support_map_and_vec_can_be_evaluated() { let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); - let ElectionResult { - winners, - assignments, - } = seq_phragmen::<_, Perbill>( + let ElectionResult { winners, assignments } = seq_phragmen::<_, Perbill>( 2, candidates, voters @@ -874,10 +732,7 @@ mod assignment_convert_normalize { fn assignment_convert_works() { let staked = StakedAssignment { who: 1 as AccountId, - distribution: vec![ - (20, 100 as ExtendedBalance), - (30, 25), - ], + distribution: vec![(20, 100 as ExtendedBalance), (30, 25)], }; let assignment = staked.clone().into_assignment(); @@ -892,10 +747,7 @@ mod assignment_convert_normalize { } ); - assert_eq!( - assignment.into_staked(125), - staked, - ); + assert_eq!(assignment.into_staked(125), staked,); } #[test] @@ -903,11 +755,9 @@ mod assignment_convert_normalize { assert_eq!( Assignment { who: 1, - distribution: vec![ - (2, Perbill::from_percent(33)), - (3, Perbill::from_percent(66)), - ] - }.into_staked(100), + distribution: vec![(2, Perbill::from_percent(33)), (3, Perbill::from_percent(66)),] + } + .into_staked(100), StakedAssignment { who: 1, distribution: vec![ @@ -926,7 +776,8 @@ mod assignment_convert_normalize { (3, 333_333_333_333_333), (4, 666_666_666_666_333), ], - }.into_assignment(), + } + .into_assignment(), Assignment { who: 1, distribution: vec![ @@ -947,7 +798,7 @@ mod assignment_convert_normalize { (2, Perbill::from_parts(330000000)), (3, Perbill::from_parts(660000000)), // sum is not 100%! - ] + ], }; a.try_normalize().unwrap(); assert_eq!( @@ -964,24 +815,9 @@ mod assignment_convert_normalize { #[test] fn staked_assignment_can_normalize() { - let mut a = StakedAssignment { - who: 1, - distribution: vec![ - (2, 33), - (3, 66), - ] - }; + let mut a = StakedAssignment { who: 1, distribution: vec![(2, 33), (3, 66)] }; a.try_normalize(100).unwrap(); - assert_eq!( - a, - StakedAssignment { - who: 1, - distribution: vec![ - (2, 34), - (3, 66), - ] - }, - ); + assert_eq!(a, StakedAssignment { who: 1, distribution: vec![(2, 34), (3, 66),] },); } } @@ -991,28 +827,16 @@ mod score { fn score_comparison_is_lexicographical_no_epsilon() { let epsilon = Perbill::zero(); // only better in the fist parameter, worse in the other two ✅ - assert_eq!( - is_score_better([12, 10, 35], [10, 20, 30], epsilon), - true, - ); + assert_eq!(is_score_better([12, 10, 35], [10, 20, 30], epsilon), true,); // worse in the first, better in the other two ❌ - assert_eq!( - is_score_better([9, 30, 10], [10, 20, 30], epsilon), - false, - ); + assert_eq!(is_score_better([9, 30, 10], [10, 20, 30], epsilon), false,); // equal in the first, the second one dictates. - assert_eq!( - is_score_better([10, 25, 40], [10, 20, 30], epsilon), - true, - ); + assert_eq!(is_score_better([10, 25, 40], [10, 20, 30], epsilon), true,); // equal in the first two, the last one dictates. - assert_eq!( - is_score_better([10, 20, 40], [10, 20, 30], epsilon), - false, - ); + assert_eq!(is_score_better([10, 20, 40], [10, 20, 30], epsilon), false,); } #[test] @@ -1021,120 +845,72 @@ mod score { { // no more than 1 percent (10) better in the first param. - assert_eq!( - is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), false,); // now equal, still not better. - assert_eq!( - is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), false,); // now it is. - assert_eq!( - is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), - true, - ); + assert_eq!(is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), true,); } { // First score score is epsilon better, but first score is no longer `ge`. Then this is // still not a good solution. - assert_eq!( - is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), - false, - ); + assert_eq!(is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), false,); } { // first score is equal or better, but not epsilon. Then second one is the determinant. - assert_eq!( - is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), - false, - ); - - assert_eq!( - is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), - false, - ); - - assert_eq!( - is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), - true, - ); + assert_eq!(is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), false,); + + assert_eq!(is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), false,); + + assert_eq!(is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), true,); } { // first score and second are equal or less than epsilon more, third is determinant. - assert_eq!( - is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), - false, - ); - - assert_eq!( - is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), - false, - ); - - assert_eq!( - is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), - true, - ); + assert_eq!(is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), false,); + + assert_eq!(is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), false,); + + assert_eq!(is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), true,); } } #[test] fn score_comparison_large_value() { // some random value taken from eras in kusama. - let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; + let initial = + [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; // this claim is 0.04090% better in the third component. It should be accepted as better if // epsilon is smaller than 5/10_0000 - let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; + let claim = + [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational(1u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(1u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational(2u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(2u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational(3u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(3u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational(4u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(4u32, 10_000),), true, ); assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational(5u32, 10_000), - ), + is_score_better(claim.clone(), initial.clone(), Perbill::from_rational(5u32, 10_000),), false, ); } @@ -1223,10 +999,7 @@ mod solution_type { let encoded = compact.encode(); - assert_eq!( - compact, - Decode::decode(&mut &encoded[..]).unwrap(), - ); + assert_eq!(compact, Decode::decode(&mut &encoded[..]).unwrap(),); assert_eq!(compact.voter_count(), 4); assert_eq!(compact.edge_count(), 2 + 4); assert_eq!(compact.unique_targets(), vec![10, 11, 20, 40, 50, 51]); @@ -1240,13 +1013,11 @@ mod solution_type { (2, (0, TestAccuracy::from_percent(80)), 1), (3, (7, TestAccuracy::from_percent(85)), 8), ], - votes3: vec![ - ( - 4, - [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], - 5, - ), - ], + votes3: vec![( + 4, + [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], + 5, + )], ..Default::default() }; @@ -1256,16 +1027,12 @@ mod solution_type { compact, TestSolutionCompact { votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (3, (7, TestAccuracy::from_percent(85)), 8), - ], - votes3: vec![ - ( - 4, - [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], - 5, - ), - ], + votes2: vec![(3, (7, TestAccuracy::from_percent(85)), 8),], + votes3: vec![( + 4, + [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], + 5, + ),], ..Default::default() }, ); @@ -1275,9 +1042,7 @@ mod solution_type { compact, TestSolutionCompact { votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (3, (7, TestAccuracy::from_percent(85)), 8), - ], + votes2: vec![(3, (7, TestAccuracy::from_percent(85)), 8),], ..Default::default() }, ); @@ -1287,9 +1052,7 @@ mod solution_type { compact, TestSolutionCompact { votes1: vec![(0, 2)], - votes2: vec![ - (3, (7, TestAccuracy::from_percent(85)), 8), - ], + votes2: vec![(3, (7, TestAccuracy::from_percent(85)), 8),], ..Default::default() }, ); @@ -1297,13 +1060,7 @@ mod solution_type { #[test] fn basic_from_and_into_compact_works_assignments() { - let voters = vec![ - 2 as AccountId, - 4, - 1, - 5, - 3, - ]; + let voters = vec![2 as AccountId, 4, 1, 5, 3]; let targets = vec![ 10 as AccountId, 11, @@ -1319,17 +1076,14 @@ mod solution_type { let assignments = vec![ Assignment { who: 2 as AccountId, - distribution: vec![(20u64, TestAccuracy::from_percent(100))] - }, - Assignment { - who: 4, - distribution: vec![(40, TestAccuracy::from_percent(100))], + distribution: vec![(20u64, TestAccuracy::from_percent(100))], }, + Assignment { who: 4, distribution: vec![(40, TestAccuracy::from_percent(100))] }, Assignment { who: 1, distribution: vec![ (10, TestAccuracy::from_percent(80)), - (11, TestAccuracy::from_percent(20)) + (11, TestAccuracy::from_percent(20)), ], }, Assignment { @@ -1337,7 +1091,7 @@ mod solution_type { distribution: vec![ (50, TestAccuracy::from_percent(85)), (51, TestAccuracy::from_percent(15)), - ] + ], }, Assignment { who: 3, @@ -1356,11 +1110,8 @@ mod solution_type { targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let compacted = TestSolutionCompact::from_assignment( - &assignments, - voter_index, - target_index, - ).unwrap(); + let compacted = + TestSolutionCompact::from_assignment(&assignments, voter_index, target_index).unwrap(); // basically number of assignments that it is encoding. assert_eq!(compacted.voter_count(), assignments.len()); @@ -1377,21 +1128,16 @@ mod solution_type { (2, (0, TestAccuracy::from_percent(80)), 1), (3, (7, TestAccuracy::from_percent(85)), 8), ], - votes3: vec![ - ( - 4, - [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], - 5, - ), - ], + votes3: vec![( + 4, + [(3, TestAccuracy::from_percent(50)), (4, TestAccuracy::from_percent(25))], + 5, + ),], ..Default::default() } ); - assert_eq!( - compacted.unique_targets(), - vec![0, 1, 2, 3, 4, 5, 6, 7, 8], - ); + assert_eq!(compacted.unique_targets(), vec![0, 1, 2, 3, 4, 5, 6, 7, 8],); let voter_at = |a: u32| -> Option { voters.get(>::try_into(a).unwrap()).cloned() @@ -1400,10 +1146,7 @@ mod solution_type { targets.get(>::try_into(a).unwrap()).cloned() }; - assert_eq!( - compacted.into_assignment(voter_at, target_at).unwrap(), - assignments, - ); + assert_eq!(compacted.into_assignment(voter_at, target_at).unwrap(), assignments,); } #[test] @@ -1413,57 +1156,42 @@ mod solution_type { // we don't really care about voters here so all duplicates. This is not invalid per se. let compact = TestSolutionCompact { votes1: vec![(99, 1), (99, 2)], - votes2: vec![ - (99, (3, ACC.clone()), 7), - (99, (4, ACC.clone()), 8), - ], - votes3: vec![ - (99, [(11, ACC.clone()), (12, ACC.clone())], 13), - ], + votes2: vec![(99, (3, ACC.clone()), 7), (99, (4, ACC.clone()), 8)], + votes3: vec![(99, [(11, ACC.clone()), (12, ACC.clone())], 13)], // ensure the last one is also counted. - votes16: vec![ - ( - 99, - [ - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - (66, ACC.clone()), - ], - 67, - ) - ], + votes16: vec![( + 99, + [ + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + (66, ACC.clone()), + ], + 67, + )], ..Default::default() }; - assert_eq!( - compact.unique_targets(), - vec![1, 2, 3, 4, 7, 8, 11, 12, 13, 66, 67] - ); + assert_eq!(compact.unique_targets(), vec![1, 2, 3, 4, 7, 8, 11, 12, 13, 66, 67]); assert_eq!(compact.edge_count(), 2 + (2 * 2) + 3 + 16); assert_eq!(compact.voter_count(), 6); // this one has some duplicates. let compact = TestSolutionCompact { votes1: vec![(99, 1), (99, 1)], - votes2: vec![ - (99, (3, ACC.clone()), 7), - (99, (4, ACC.clone()), 8), - ], - votes3: vec![ - (99, [(11, ACC.clone()), (11, ACC.clone())], 13), - ], + votes2: vec![(99, (3, ACC.clone()), 7), (99, (4, ACC.clone()), 8)], + votes3: vec![(99, [(11, ACC.clone()), (11, ACC.clone())], 13)], ..Default::default() }; @@ -1484,7 +1212,6 @@ mod solution_type { let voter_at = |a: u32| -> Option { Some(a as AccountId) }; let target_at = |a: u8| -> Option { Some(a as AccountId) }; - assert_eq!( compact.into_assignment(&voter_at, &target_at).unwrap_err(), PhragmenError::CompactStakeOverflow, @@ -1494,7 +1221,11 @@ mod solution_type { let compact = TestSolutionCompact { votes1: Default::default(), votes2: Default::default(), - votes3: vec![(0, [(1, TestAccuracy::from_percent(70)), (2, TestAccuracy::from_percent(80))], 3)], + votes3: vec![( + 0, + [(1, TestAccuracy::from_percent(70)), (2, TestAccuracy::from_percent(80))], + 3, + )], ..Default::default() }; @@ -1509,21 +1240,15 @@ mod solution_type { let voter_index = |a: &AccountId| -> Option { Some(*a as u32) }; let target_index = |a: &AccountId| -> Option { Some(*a as u8) }; - let assignments = vec![ - Assignment { - who: 1 as AccountId, - distribution: - (10..27) - .map(|i| (i as AccountId, Percent::from_parts(i as u8))) - .collect::>(), - }, - ]; + let assignments = vec![Assignment { + who: 1 as AccountId, + distribution: (10..27) + .map(|i| (i as AccountId, Percent::from_parts(i as u8))) + .collect::>(), + }]; - let compacted = TestSolutionCompact::from_assignment( - &assignments, - voter_index, - target_index, - ); + let compacted = + TestSolutionCompact::from_assignment(&assignments, voter_index, target_index); assert_eq!(compacted.unwrap_err(), PhragmenError::CompactTargetOverflow); } @@ -1535,12 +1260,12 @@ mod solution_type { let assignments = vec![ Assignment { who: 1 as AccountId, - distribution: vec![(10, Percent::from_percent(50)), (11, Percent::from_percent(50))], - }, - Assignment { - who: 2, - distribution: vec![], + distribution: vec![ + (10, Percent::from_percent(50)), + (11, Percent::from_percent(50)), + ], }, + Assignment { who: 2, distribution: vec![] }, ]; let voter_index = |a: &AccountId| -> Option { @@ -1550,11 +1275,8 @@ mod solution_type { targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() }; - let compacted = TestSolutionCompact::from_assignment( - &assignments, - voter_index, - target_index, - ).unwrap(); + let compacted = + TestSolutionCompact::from_assignment(&assignments, voter_index, target_index).unwrap(); assert_eq!( compacted, diff --git a/substrate/primitives/panic-handler/src/lib.rs b/substrate/primitives/panic-handler/src/lib.rs index 150ce5297680799ff8a346ef660a6bd5ea77de8e..1c72f224071cda5b3ca0c1446472589a9e5e7016 100644 --- a/substrate/primitives/panic-handler/src/lib.rs +++ b/substrate/primitives/panic-handler/src/lib.rs @@ -25,11 +25,13 @@ //! temporarily be disabled by using an [`AbortGuard`]. use backtrace::Backtrace; -use std::io::{self, Write}; -use std::marker::PhantomData; -use std::panic::{self, PanicInfo}; -use std::cell::Cell; -use std::thread; +use std::{ + cell::Cell, + io::{self, Write}, + marker::PhantomData, + panic::{self, PanicInfo}, + thread, +}; thread_local! { static ON_PANIC: Cell = Cell::new(OnPanic::Abort); @@ -56,18 +58,19 @@ pub fn set(bug_url: &str, version: &str) { panic::set_hook(Box::new({ let version = version.to_string(); let bug_url = bug_url.to_string(); - move |c| { - panic_hook(c, &bug_url, &version) - } + move |c| panic_hook(c, &bug_url, &version) })); } macro_rules! ABOUT_PANIC { - () => (" + () => { + " This is a bug. Please report it at: {} -")} +" + }; +} /// Set aborting flag. Returns previous value of the flag. fn set_abort(on_panic: OnPanic) -> OnPanic { @@ -92,35 +95,26 @@ pub struct AbortGuard { /// Value that was in `ABORT` before we created this guard. previous_val: OnPanic, /// Marker so that `AbortGuard` doesn't implement `Send`. - _not_send: PhantomData> + _not_send: PhantomData>, } impl AbortGuard { /// Create a new guard. While the guard is alive, panics that happen in the current thread will /// unwind the stack (unless another guard is created afterwards). pub fn force_unwind() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::Unwind), - _not_send: PhantomData - } + AbortGuard { previous_val: set_abort(OnPanic::Unwind), _not_send: PhantomData } } /// Create a new guard. While the guard is alive, panics that happen in the current thread will /// abort the process (unless another guard is created afterwards). pub fn force_abort() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::Abort), - _not_send: PhantomData - } + AbortGuard { previous_val: set_abort(OnPanic::Abort), _not_send: PhantomData } } /// Create a new guard. While the guard is alive, panics that happen in the current thread will /// **never** abort the process (even if `AbortGuard::force_abort()` guard will be created afterwards). pub fn never_abort() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::NeverAbort), - _not_send: PhantomData - } + AbortGuard { previous_val: set_abort(OnPanic::NeverAbort), _not_send: PhantomData } } } @@ -141,7 +135,7 @@ fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { None => match info.payload().downcast_ref::() { Some(s) => &s[..], None => "Box", - } + }, }; let thread = thread::current(); @@ -158,11 +152,7 @@ fn panic_hook(info: &PanicInfo, report_url: &str, version: &str) { let _ = writeln!(stderr, ""); let _ = writeln!(stderr, "{:?}", backtrace); let _ = writeln!(stderr, ""); - let _ = writeln!( - stderr, - "Thread '{}' panicked at '{}', {}:{}", - name, msg, file, line - ); + let _ = writeln!(stderr, "Thread '{}' panicked at '{}', {}:{}", name, msg, file, line); let _ = writeln!(stderr, ABOUT_PANIC!(), report_url); ON_PANIC.with(|val| { diff --git a/substrate/primitives/rpc/src/lib.rs b/substrate/primitives/rpc/src/lib.rs index ea7118479943d8934beba94f5d5400ce42046d8c..0d716d5a07c18a87cae504349eb66fd24c4834be 100644 --- a/substrate/primitives/rpc/src/lib.rs +++ b/substrate/primitives/rpc/src/lib.rs @@ -19,22 +19,16 @@ #![warn(missing_docs)] -pub mod number; pub mod list; +pub mod number; pub mod tracing; /// A util function to assert the result of serialization and deserialization is the same. #[cfg(test)] -pub(crate) fn assert_deser(s: &str, expected: T) where - T: std::fmt::Debug + serde::ser::Serialize + serde::de::DeserializeOwned + PartialEq +pub(crate) fn assert_deser(s: &str, expected: T) +where + T: std::fmt::Debug + serde::ser::Serialize + serde::de::DeserializeOwned + PartialEq, { - assert_eq!( - serde_json::from_str::(s).unwrap(), - expected - ); - assert_eq!( - serde_json::to_string(&expected).unwrap(), - s - ); + assert_eq!(serde_json::from_str::(s).unwrap(), expected); + assert_eq!(serde_json::to_string(&expected).unwrap(), s); } - diff --git a/substrate/primitives/rpc/src/list.rs b/substrate/primitives/rpc/src/list.rs index 1f4c6ff098c4d7643f5a8213b87cfc90d6886df2..b3d0a4f546e94e7712d670657d582dd14350d3bb 100644 --- a/substrate/primitives/rpc/src/list.rs +++ b/substrate/primitives/rpc/src/list.rs @@ -17,7 +17,7 @@ //! RPC a lenient list or value type. -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// RPC list or value wrapper. /// diff --git a/substrate/primitives/rpc/src/number.rs b/substrate/primitives/rpc/src/number.rs index ad19b7f5b43671d41a30166620b42f09fa71e44d..916f2c3d8326648b6c190c616737620909914db8 100644 --- a/substrate/primitives/rpc/src/number.rs +++ b/substrate/primitives/rpc/src/number.rs @@ -18,9 +18,12 @@ //! A number type that can be serialized both as a number or a string that encodes a number in a //! string. -use std::{convert::{TryFrom, TryInto}, fmt::Debug}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sp_core::U256; +use std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, +}; /// A number type that can be serialized both as a number or a string that encodes a number in a /// string. diff --git a/substrate/primitives/rpc/src/tracing.rs b/substrate/primitives/rpc/src/tracing.rs index 1062ec1d9ebe5c4043076d10a02b376913eb8238..7e05cd84a7dd7ce8ea60a07e901e6495a3f0779d 100644 --- a/substrate/primitives/rpc/src/tracing.rs +++ b/substrate/primitives/rpc/src/tracing.rs @@ -17,7 +17,7 @@ //! Types for working with tracing data -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use rustc_hash::FxHashMap; @@ -84,7 +84,7 @@ pub struct Data { #[serde(rename_all = "camelCase")] pub struct TraceError { /// Error message - pub error: String, + pub error: String, } /// Response for the `state_traceBlock` RPC. @@ -94,5 +94,5 @@ pub enum TraceBlockResponse { /// Error block tracing response TraceError(TraceError), /// Successful block tracing response - BlockTrace(BlockTrace) + BlockTrace(BlockTrace), } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/lib.rs b/substrate/primitives/runtime-interface/proc-macro/src/lib.rs index 53df4e084d277a558bfd4b1533b552c0fd3c4d05..502130f1b41080d357f9c456de022ec2fc6eb3a5 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/lib.rs @@ -26,8 +26,10 @@ //! 3. The [`PassByEnum`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Enum`. //! 4. The [`PassByInner`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Inner`. -use syn::{parse_macro_input, ItemTrait, DeriveInput, Result, Token}; -use syn::parse::{Parse, ParseStream}; +use syn::{ + parse::{Parse, ParseStream}, + parse_macro_input, DeriveInput, ItemTrait, Result, Token, +}; mod pass_by; mod runtime_interface; @@ -35,7 +37,7 @@ mod utils; struct Options { wasm_only: bool, - tracing: bool + tracing: bool, } impl Options { @@ -86,17 +88,21 @@ pub fn runtime_interface( #[proc_macro_derive(PassByCodec)] pub fn pass_by_codec(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); - pass_by::codec_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() + pass_by::codec_derive_impl(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(PassByInner)] pub fn pass_by_inner(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); - pass_by::inner_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() + pass_by::inner_derive_impl(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(PassByEnum)] pub fn pass_by_enum(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as DeriveInput); pass_by::enum_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() -} \ No newline at end of file +} diff --git a/substrate/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs b/substrate/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs index 1e6b72f8823398ad365fde9b646489351524aa26..2be455d17a47b1263bf9322a8933c16fb025668f 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs @@ -22,7 +22,7 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Generics, parse_quote}; +use syn::{parse_quote, DeriveInput, Generics, Result}; use quote::quote; @@ -53,7 +53,7 @@ pub fn derive_impl(mut input: DeriveInput) -> Result { fn add_trait_bounds(generics: &mut Generics) { let crate_ = generate_crate_access(); - generics.type_params_mut() + generics + .type_params_mut() .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::codec::Codec))); } - diff --git a/substrate/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs b/substrate/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs index cc0428fc9b56bc2d34b577836eccac869950e75c..f614e4d9f294dd2d28a0371fe248fffc8b1a5f1d 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs @@ -21,11 +21,11 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Data, Fields, Error, Ident}; +use syn::{Data, DeriveInput, Error, Fields, Ident, Result}; use quote::quote; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; /// The derive implementation for `PassBy` with `Enum`. pub fn derive_impl(input: DeriveInput) -> Result { @@ -81,22 +81,21 @@ pub fn derive_impl(input: DeriveInput) -> Result { /// enum or a variant is not an unit. fn get_enum_field_idents<'a>(data: &'a Data) -> Result>> { match data { - Data::Enum(d) => { + Data::Enum(d) => if d.variants.len() <= 256 { - Ok( - d.variants.iter().map(|v| if let Fields::Unit = v.fields { + Ok(d.variants.iter().map(|v| { + if let Fields::Unit = v.fields { Ok(&v.ident) } else { Err(Error::new( Span::call_site(), "`PassByEnum` only supports unit variants.", )) - }) - ) + } + })) } else { Err(Error::new(Span::call_site(), "`PassByEnum` only supports `256` variants.")) - } - }, - _ => Err(Error::new(Span::call_site(), "`PassByEnum` only supports enums as input type.")) + }, + _ => Err(Error::new(Span::call_site(), "`PassByEnum` only supports enums as input type.")), } } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs b/substrate/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs index 7fe0d1734c36cacd330aeb62b75f85657430cdf3..6eaa689d6293a0cd064046013dde0feadf0eb0a1 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs @@ -22,11 +22,11 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Generics, parse_quote, Type, Data, Error, Fields, Ident}; +use syn::{parse_quote, Data, DeriveInput, Error, Fields, Generics, Ident, Result, Type}; use quote::quote; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; /// The derive implementation for `PassBy` with `Inner` and `PassByInner`. pub fn derive_impl(mut input: DeriveInput) -> Result { @@ -80,7 +80,8 @@ pub fn derive_impl(mut input: DeriveInput) -> Result { fn add_trait_bounds(generics: &mut Generics) { let crate_ = generate_crate_access(); - generics.type_params_mut() + generics + .type_params_mut() .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::RIType))); } @@ -97,15 +98,13 @@ fn extract_inner_ty_and_name(data: &Data) -> Result<(Type, Option)> { Fields::Unnamed(ref unnamed) if unnamed.unnamed.len() == 1 => { let field = &unnamed.unnamed[0]; return Ok((field.ty.clone(), field.ident.clone())) - } + }, _ => {}, } } - Err( - Error::new( - Span::call_site(), - "Only newtype/one field structs are supported by `PassByInner`!", - ) - ) + Err(Error::new( + Span::call_site(), + "Only newtype/one field structs are supported by `PassByInner`!", + )) } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index d17067d990c360f62675de9991214bad08cb4bc2..1943acbb214da08a9a5ada8348a794a58c6e4734 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -30,15 +30,16 @@ //! are feature-gated, so that one is compiled for the native and the other for the wasm side. use crate::utils::{ - generate_crate_access, create_exchangeable_host_function_ident, get_function_arguments, - get_function_argument_names, get_runtime_interface, create_function_ident_with_version, + create_exchangeable_host_function_ident, create_function_ident_with_version, + generate_crate_access, get_function_argument_names, get_function_arguments, + get_runtime_interface, }; use syn::{ - Ident, ItemTrait, TraitItemMethod, FnArg, Signature, Result, spanned::Spanned, parse_quote, + parse_quote, spanned::Spanned, FnArg, Ident, ItemTrait, Result, Signature, TraitItemMethod, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::{quote, quote_spanned}; @@ -51,21 +52,22 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool, tracing: bool) -> Res let runtime_interface = get_runtime_interface(trait_def)?; // latest version dispatch - let token_stream: Result = runtime_interface.latest_versions() - .try_fold( - TokenStream::new(), - |mut t, (latest_version, method)| { - t.extend(function_for_method(method, latest_version, is_wasm_only)?); - Ok(t) - } - ); + let token_stream: Result = runtime_interface.latest_versions().try_fold( + TokenStream::new(), + |mut t, (latest_version, method)| { + t.extend(function_for_method(method, latest_version, is_wasm_only)?); + Ok(t) + }, + ); // earlier versions compatibility dispatch (only std variant) - let result: Result = runtime_interface.all_versions().try_fold(token_stream?, |mut t, (version, method)| - { - t.extend(function_std_impl(trait_name, method, version, is_wasm_only, tracing)?); - Ok(t) - }); + let result: Result = + runtime_interface + .all_versions() + .try_fold(token_stream?, |mut t, (version, method)| { + t.extend(function_std_impl(trait_name, method, version, is_wasm_only, tracing)?); + Ok(t) + }); result } @@ -76,21 +78,16 @@ fn function_for_method( latest_version: u32, is_wasm_only: bool, ) -> Result { - let std_impl = if !is_wasm_only { - function_std_latest_impl(method, latest_version)? - } else { - quote!() - }; + let std_impl = + if !is_wasm_only { function_std_latest_impl(method, latest_version)? } else { quote!() }; let no_std_impl = function_no_std_impl(method)?; - Ok( - quote! { - #std_impl + Ok(quote! { + #std_impl - #no_std_impl - } - ) + #no_std_impl + }) } /// Generates the bare function implementation for `cfg(not(feature = "std"))`. @@ -102,31 +99,27 @@ fn function_no_std_impl(method: &TraitItemMethod) -> Result { let return_value = &method.sig.output; let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); - Ok( - quote! { - #[cfg(not(feature = "std"))] - #( #attrs )* - pub fn #function_name( #( #args, )* ) #return_value { - // Call the host function - #host_function_name.get()( #( #arg_names, )* ) - } + Ok(quote! { + #[cfg(not(feature = "std"))] + #( #attrs )* + pub fn #function_name( #( #args, )* ) #return_value { + // Call the host function + #host_function_name.get()( #( #arg_names, )* ) } - ) + }) } /// Generate call to latest function version for `cfg((feature = "std")` /// /// This should generate simple `fn func(..) { func_version_(..) }`. -fn function_std_latest_impl( - method: &TraitItemMethod, - latest_version: u32, -) -> Result { +fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Result { let function_name = &method.sig.ident; let args = get_function_arguments(&method.sig).map(FnArg::Typed); let arg_names = get_function_argument_names(&method.sig).collect::>(); let return_value = &method.sig.output; let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); - let latest_function_name = create_function_ident_with_version(&method.sig.ident, latest_version); + let latest_function_name = + create_function_ident_with_version(&method.sig.ident, latest_version); Ok(quote_spanned! { method.span() => #[cfg(feature = "std")] @@ -153,17 +146,16 @@ fn function_std_impl( let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig).map(FnArg::Typed).chain( // Add the function context as last parameter when this is a wasm only interface. - iter::from_fn(|| + iter::from_fn(|| { if is_wasm_only { - Some( - parse_quote!( - mut __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext - ) - ) + Some(parse_quote!( + mut __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext + )) } else { None } - ).take(1), + }) + .take(1), ); let return_value = &method.sig.output; let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); @@ -179,15 +171,13 @@ fn function_std_impl( ) }; - Ok( - quote_spanned! { method.span() => - #[cfg(feature = "std")] - #( #attrs )* - fn #function_name( #( #args, )* ) #return_value { - #call_to_trait - } + Ok(quote_spanned! { method.span() => + #[cfg(feature = "std")] + #( #attrs )* + fn #function_name( #( #args, )* ) #return_value { + #call_to_trait } - ) + }) } /// Generate the call to the interface trait. @@ -199,10 +189,8 @@ fn generate_call_to_trait( ) -> TokenStream { let crate_ = generate_crate_access(); let method_name = create_function_ident_with_version(&method.sig.ident, version); - let expect_msg = format!( - "`{}` called outside of an Externalities-provided environment.", - method_name, - ); + let expect_msg = + format!("`{}` called outside of an Externalities-provided environment.", method_name,); let arg_names = get_function_argument_names(&method.sig); if takes_self_argument(&method.sig) { diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index fb127b19415327dd78ce8691ac8ac8e8dd6fda65..ab84c04e3a72812fdddc1a3de57b2b946ead69e4 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -22,35 +22,36 @@ //! executor. These implementations call the bare function interface. use crate::utils::{ - generate_crate_access, create_host_function_ident, get_function_argument_names, - get_function_argument_types_without_ref, get_function_argument_types_ref_and_mut, - get_function_argument_names_and_types_without_ref, get_function_arguments, - get_function_argument_types, create_exchangeable_host_function_ident, get_runtime_interface, - create_function_ident_with_version, + create_exchangeable_host_function_ident, create_function_ident_with_version, + create_host_function_ident, generate_crate_access, get_function_argument_names, + get_function_argument_names_and_types_without_ref, get_function_argument_types, + get_function_argument_types_ref_and_mut, get_function_argument_types_without_ref, + get_function_arguments, get_runtime_interface, }; use syn::{ - ItemTrait, TraitItemMethod, Result, ReturnType, Ident, Pat, Error, Signature, spanned::Spanned, + spanned::Spanned, Error, Ident, ItemTrait, Pat, Result, ReturnType, Signature, TraitItemMethod, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::{quote, ToTokens}; use inflector::Inflector; -use std::iter::{Iterator, self}; +use std::iter::{self, Iterator}; /// Generate the extern host functions for wasm and the `HostFunctions` struct that provides the /// implementations for the host functions on the host. pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { let trait_name = &trait_def.ident; - let extern_host_function_impls = get_runtime_interface(trait_def)? - .latest_versions() - .try_fold(TokenStream::new(), |mut t, (version, method)| { + let extern_host_function_impls = get_runtime_interface(trait_def)?.latest_versions().try_fold( + TokenStream::new(), + |mut t, (version, method)| { t.extend(generate_extern_host_function(method, version, trait_name)?); Ok::<_, Error>(t) - })?; + }, + )?; let exchangeable_host_functions = get_runtime_interface(trait_def)? .latest_versions() .try_fold(TokenStream::new(), |mut t, (_, m)| { @@ -59,27 +60,29 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result ret` to make the function implementations exchangeable. - #[cfg(not(feature = "std"))] - mod extern_host_function_impls { - use super::*; - - #extern_host_function_impls - } - - #exchangeable_host_functions + Ok(quote! { + /// The implementations of the extern host functions. This special implementation module + /// is required to change the extern host functions signature to + /// `unsafe fn name(args) -> ret` to make the function implementations exchangeable. + #[cfg(not(feature = "std"))] + mod extern_host_function_impls { + use super::*; - #host_functions_struct + #extern_host_function_impls } - ) + + #exchangeable_host_functions + + #host_functions_struct + }) } /// Generate the extern host function for the given method. -fn generate_extern_host_function(method: &TraitItemMethod, version: u32, trait_name: &Ident) -> Result { +fn generate_extern_host_function( + method: &TraitItemMethod, + version: u32, + trait_name: &Ident, +) -> Result { let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig); let arg_types = get_function_argument_types_without_ref(&method.sig); @@ -106,33 +109,31 @@ fn generate_extern_host_function(method: &TraitItemMethod, version: u32, trait_n ReturnType::Default => quote!(), ReturnType::Type(_, ref ty) => quote! { <#ty as #crate_::wasm::FromFFIValue>::from_ffi_value(result) - } + }, }; - Ok( - quote! { - #[doc = #doc_string] - pub fn #function ( #( #args ),* ) #return_value { - extern "C" { - /// The extern function. - pub fn #ext_function ( - #( #arg_names: <#arg_types as #crate_::RIType>::FFIType ),* - ) #ffi_return_value; - } + Ok(quote! { + #[doc = #doc_string] + pub fn #function ( #( #args ),* ) #return_value { + extern "C" { + /// The extern function. + pub fn #ext_function ( + #( #arg_names: <#arg_types as #crate_::RIType>::FFIType ),* + ) #ffi_return_value; + } - // Generate all wrapped ffi values. - #( - let #arg_names2 = <#arg_types2 as #crate_::wasm::IntoFFIValue>::into_ffi_value( - &#arg_names2, - ); - )* + // Generate all wrapped ffi values. + #( + let #arg_names2 = <#arg_types2 as #crate_::wasm::IntoFFIValue>::into_ffi_value( + &#arg_names2, + ); + )* - let result = unsafe { #ext_function( #( #arg_names3.get() ),* ) }; + let result = unsafe { #ext_function( #( #arg_names3.get() ),* ) }; - #convert_return_value - } + #convert_return_value } - ) + }) } /// Generate the host exchangeable function for the given method. @@ -144,44 +145,43 @@ fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result = #crate_::wasm::ExchangeableFunction::new(extern_host_function_impls::#function); - } - ) + Ok(quote! { + #[cfg(not(feature = "std"))] + #[allow(non_upper_case_globals)] + #[doc = #doc_string] + pub static #exchangeable_function : #crate_::wasm::ExchangeableFunction< + fn ( #( #arg_types ),* ) #output + > = #crate_::wasm::ExchangeableFunction::new(extern_host_function_impls::#function); + }) } /// Generate the `HostFunctions` struct that implements `wasm-interface::HostFunctions` to provide /// implementations for the extern host functions. -fn generate_host_functions_struct(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { +fn generate_host_functions_struct( + trait_def: &ItemTrait, + is_wasm_only: bool, +) -> Result { let crate_ = generate_crate_access(); let host_functions = get_runtime_interface(trait_def)? .all_versions() - .map(|(version, method)| + .map(|(version, method)| { generate_host_function_implementation(&trait_def.ident, method, version, is_wasm_only) - ) + }) .collect::>>()?; - Ok( - quote! { - /// Provides implementations for the extern host functions. - #[cfg(feature = "std")] - pub struct HostFunctions; - - #[cfg(feature = "std")] - impl #crate_::sp_wasm_interface::HostFunctions for HostFunctions { - fn host_functions() -> Vec<&'static dyn #crate_::sp_wasm_interface::Function> { - vec![ #( #host_functions ),* ] - } + Ok(quote! { + /// Provides implementations for the extern host functions. + #[cfg(feature = "std")] + pub struct HostFunctions; + + #[cfg(feature = "std")] + impl #crate_::sp_wasm_interface::HostFunctions for HostFunctions { + fn host_functions() -> Vec<&'static dyn #crate_::sp_wasm_interface::Function> { + vec![ #( #host_functions ),* ] } } - ) + }) } /// Generates the host function struct that implements `wasm_interface::Function` and returns a static @@ -199,71 +199,65 @@ fn generate_host_function_implementation( let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); let crate_ = generate_crate_access(); let signature = generate_wasm_interface_signature_for_host_function(&method.sig)?; - let wasm_to_ffi_values = generate_wasm_to_ffi_values( - &method.sig, - trait_name, - ).collect::>>()?; + let wasm_to_ffi_values = + generate_wasm_to_ffi_values(&method.sig, trait_name).collect::>>()?; let ffi_to_host_values = generate_ffi_to_host_value(&method.sig).collect::>>()?; let host_function_call = generate_host_function_call(&method.sig, version, is_wasm_only); let into_preallocated_ffi_value = generate_into_preallocated_ffi_value(&method.sig)?; let convert_return_value = generate_return_value_into_wasm_value(&method.sig); - Ok( - quote! { - { - struct #struct_name; - - impl #crate_::sp_wasm_interface::Function for #struct_name { - fn name(&self) -> &str { - #name - } - - fn signature(&self) -> #crate_::sp_wasm_interface::Signature { - #signature - } - - fn execute( - &self, - __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext, - args: &mut dyn Iterator, - ) -> std::result::Result, String> { - #( #wasm_to_ffi_values )* - #( #ffi_to_host_values )* - #host_function_call - #into_preallocated_ffi_value - #convert_return_value - } + Ok(quote! { + { + struct #struct_name; + + impl #crate_::sp_wasm_interface::Function for #struct_name { + fn name(&self) -> &str { + #name + } + + fn signature(&self) -> #crate_::sp_wasm_interface::Signature { + #signature } - &#struct_name as &dyn #crate_::sp_wasm_interface::Function + fn execute( + &self, + __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext, + args: &mut dyn Iterator, + ) -> std::result::Result, String> { + #( #wasm_to_ffi_values )* + #( #ffi_to_host_values )* + #host_function_call + #into_preallocated_ffi_value + #convert_return_value + } } + + &#struct_name as &dyn #crate_::sp_wasm_interface::Function } - ) + }) } /// Generate the `wasm_interface::Signature` for the given host function `sig`. fn generate_wasm_interface_signature_for_host_function(sig: &Signature) -> Result { let crate_ = generate_crate_access(); let return_value = match &sig.output { - ReturnType::Type(_, ty) => - quote! { - Some( <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE ) - }, - ReturnType::Default => quote!( None ), + ReturnType::Type(_, ty) => quote! { + Some( <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE ) + }, + ReturnType::Default => quote!(None), }; - let arg_types = get_function_argument_types_without_ref(sig) - .map(|ty| quote! { + let arg_types = get_function_argument_types_without_ref(sig).map(|ty| { + quote! { <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE - }); + } + }); - Ok( - quote! { - #crate_::sp_wasm_interface::Signature { - args: std::borrow::Cow::Borrowed(&[ #( #arg_types ),* ][..]), - return_value: #return_value, - } + Ok(quote! { + #crate_::sp_wasm_interface::Signature { + args: std::borrow::Cow::Borrowed(&[ #( #arg_types ),* ][..]), + return_value: #return_value, } - ) + }) } /// Generate the code that converts the wasm values given to `HostFunctions::execute` into the FFI @@ -279,24 +273,23 @@ fn generate_wasm_to_ffi_values<'a>( function_name, ); - get_function_argument_names_and_types_without_ref(sig) - .map(move |(name, ty)| { - let try_from_error = format!( - "Could not instantiate `{}` from wasm value while executing `{}` from interface `{}`!", - name.to_token_stream(), - function_name, - trait_name, - ); + get_function_argument_names_and_types_without_ref(sig).map(move |(name, ty)| { + let try_from_error = format!( + "Could not instantiate `{}` from wasm value while executing `{}` from interface `{}`!", + name.to_token_stream(), + function_name, + trait_name, + ); - let var_name = generate_ffi_value_var_name(&name)?; + let var_name = generate_ffi_value_var_name(&name)?; - Ok(quote! { - let val = args.next().ok_or_else(|| #error_message)?; - let #var_name = < - <#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::TryFromValue - >::try_from_value(val).ok_or_else(|| #try_from_error)?; - }) + Ok(quote! { + let val = args.next().ok_or_else(|| #error_message)?; + let #var_name = < + <#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::TryFromValue + >::try_from_value(val).ok_or_else(|| #try_from_error)?; }) + }) } /// Generate the code to convert the ffi values on the host to the host values using `FromFFIValue`. @@ -311,14 +304,12 @@ fn generate_ffi_to_host_value<'a>( .map(move |((name, ty), mut_access)| { let ffi_value_var_name = generate_ffi_value_var_name(&name)?; - Ok( - quote! { - let #mut_access #name = <#ty as #crate_::host::FromFFIValue>::from_ffi_value( - __function_context__, - #ffi_value_var_name, - )?; - } - ) + Ok(quote! { + let #mut_access #name = <#ty as #crate_::host::FromFFIValue>::from_ffi_value( + __function_context__, + #ffi_value_var_name, + )?; + }) }) } @@ -326,19 +317,17 @@ fn generate_ffi_to_host_value<'a>( fn generate_host_function_call(sig: &Signature, version: u32, is_wasm_only: bool) -> TokenStream { let host_function_name = create_function_ident_with_version(&sig.ident, version); let result_var_name = generate_host_function_result_var_name(&sig.ident); - let ref_and_mut = get_function_argument_types_ref_and_mut(sig).map(|ram| - ram.map(|(vr, vm)| quote!(#vr #vm)) - ); + let ref_and_mut = + get_function_argument_types_ref_and_mut(sig).map(|ram| ram.map(|(vr, vm)| quote!(#vr #vm))); let names = get_function_argument_names(sig); - let var_access = names.zip(ref_and_mut) - .map(|(n, ref_and_mut)| { - quote!( #ref_and_mut #n ) - }) + let var_access = names + .zip(ref_and_mut) + .map(|(n, ref_and_mut)| quote!( #ref_and_mut #n )) // If this is a wasm only interface, we add the function context as last parameter. .chain( iter::from_fn(|| if is_wasm_only { Some(quote!(__function_context__)) } else { None }) - .take(1) + .take(1), ); quote! { @@ -354,16 +343,15 @@ fn generate_host_function_result_var_name(name: &Ident) -> Ident { /// Generate the variable name that stores the FFI value. fn generate_ffi_value_var_name(pat: &Pat) -> Result { match pat { - Pat::Ident(pat_ident) => { + Pat::Ident(pat_ident) => if let Some(by_ref) = pat_ident.by_ref { Err(Error::new(by_ref.span(), "`ref` not supported!")) } else if let Some(sub_pattern) = &pat_ident.subpat { Err(Error::new(sub_pattern.0.span(), "Not supported!")) } else { Ok(Ident::new(&format!("{}_ffi_value", pat_ident.ident), Span::call_site())) - } - } - _ => Err(Error::new(pat.span(), "Not supported as variable name!")) + }, + _ => Err(Error::new(pat.span(), "Not supported as variable name!")), } } @@ -373,25 +361,23 @@ fn generate_ffi_value_var_name(pat: &Pat) -> Result { /// that the type implements `IntoPreAllocatedFFIValue`. fn generate_into_preallocated_ffi_value(sig: &Signature) -> Result { let crate_ = generate_crate_access(); - let ref_and_mut = get_function_argument_types_ref_and_mut(sig).map(|ram| - ram.and_then(|(vr, vm)| vm.map(|v| (vr, v))) - ); + let ref_and_mut = get_function_argument_types_ref_and_mut(sig) + .map(|ram| ram.and_then(|(vr, vm)| vm.map(|v| (vr, v)))); let names_and_types = get_function_argument_names_and_types_without_ref(sig); - ref_and_mut.zip(names_and_types) + ref_and_mut + .zip(names_and_types) .filter_map(|(ram, (name, ty))| ram.map(|_| (name, ty))) .map(|(name, ty)| { let ffi_var_name = generate_ffi_value_var_name(&name)?; - Ok( - quote! { - <#ty as #crate_::host::IntoPreallocatedFFIValue>::into_preallocated_ffi_value( - #name, - __function_context__, - #ffi_var_name, - )?; - } - ) + Ok(quote! { + <#ty as #crate_::host::IntoPreallocatedFFIValue>::into_preallocated_ffi_value( + #name, + __function_context__, + #ffi_var_name, + )?; + }) }) .collect() } @@ -401,7 +387,7 @@ fn generate_return_value_into_wasm_value(sig: &Signature) -> TokenStream { let crate_ = generate_crate_access(); match &sig.output { - ReturnType::Default => quote!( Ok(None) ), + ReturnType::Default => quote!(Ok(None)), ReturnType::Type(_, ty) => { let result_var_name = generate_host_function_result_var_name(&sig.ident); @@ -411,6 +397,6 @@ fn generate_return_value_into_wasm_value(sig: &Signature) -> TokenStream { __function_context__, ).map(#crate_::sp_wasm_interface::IntoValue::into_value).map(Some) } - } + }, } } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index 0e392b1a02fbf2452767a5dea5462215c79f9f4c..c62e3ba87ccd35dcdebf287add3557d8cd9e8085 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -19,15 +19,14 @@ //! default implementations and implements the trait for `&mut dyn Externalities`. use crate::utils::{ - generate_crate_access, - get_function_argument_types_without_ref, - get_runtime_interface, - create_function_ident_with_version, + create_function_ident_with_version, generate_crate_access, + get_function_argument_types_without_ref, get_runtime_interface, }; use syn::{ - ItemTrait, TraitItemMethod, Result, Error, fold::{self, Fold}, spanned::Spanned, - Visibility, Receiver, Type, Generics, + fold::{self, Fold}, + spanned::Spanned, + Error, Generics, ItemTrait, Receiver, Result, TraitItemMethod, Type, Visibility, }; use proc_macro2::TokenStream; @@ -40,13 +39,11 @@ pub fn process(trait_def: &ItemTrait, is_wasm_only: bool) -> Result let impl_trait = impl_trait_for_externalities(trait_def, is_wasm_only)?; let essential_trait_def = declare_essential_trait(trait_def)?; - Ok( - quote! { - #impl_trait + Ok(quote! { + #impl_trait - #essential_trait_def - } - ) + #essential_trait_def + }) } /// Converts the given trait definition into the essential trait definition without method @@ -66,12 +63,10 @@ impl ToEssentialTraitDef { let mut errors = self.errors; let methods = self.methods; if let Some(first_error) = errors.pop() { - Err( - errors.into_iter().fold(first_error, |mut o, n| { - o.combine(n); - o - }) - ) + Err(errors.into_iter().fold(first_error, |mut o, n| { + o.combine(n); + o + })) } else { Ok(methods) } @@ -101,12 +96,12 @@ impl Fold for ToEssentialTraitDef { } let arg_types = get_function_argument_types_without_ref(&method.sig); - arg_types.filter_map(|ty| - match *ty { + arg_types + .filter_map(|ty| match *ty { Type::ImplTrait(impl_trait) => Some(impl_trait), - _ => None - } - ).for_each(|invalid| self.push_error(&invalid, "`impl Trait` syntax not supported.")); + _ => None, + }) + .for_each(|invalid| self.push_error(&invalid, "`impl Trait` syntax not supported.")); self.error_on_generic_parameters(&method.sig.generics); @@ -145,13 +140,11 @@ fn declare_essential_trait(trait_def: &ItemTrait) -> Result { } let methods = folder.into_methods()?; - Ok( - quote! { - trait #trait_ { - #( #methods )* - } + Ok(quote! { + trait #trait_ { + #( #methods )* } - ) + }) } /// Implements the given trait definition for `dyn Externalities`. @@ -172,12 +165,10 @@ fn impl_trait_for_externalities(trait_def: &ItemTrait, is_wasm_only: bool) -> Re quote!( &mut dyn #crate_::Externalities ) }; - Ok( - quote! { - #[cfg(feature = "std")] - impl #trait_ for #impl_type { - #( #methods )* - } + Ok(quote! { + #[cfg(feature = "std")] + impl #trait_ for #impl_type { + #( #methods )* } - ) + }) } diff --git a/substrate/primitives/runtime-interface/proc-macro/src/utils.rs b/substrate/primitives/runtime-interface/proc-macro/src/utils.rs index d2d9dd7e3997a428c15b6711171af75344e96a85..02b5d23fbcac7371f21db76ee1689ec847f46f05 100644 --- a/substrate/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/substrate/primitives/runtime-interface/proc-macro/src/utils.rs @@ -17,16 +17,19 @@ //! Util function used by this crate. -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::{ - Ident, Error, Signature, Pat, PatType, FnArg, Type, token, TraitItemMethod, ItemTrait, - TraitItem, parse_quote, spanned::Spanned, Result, Meta, NestedMeta, Lit, Attribute, + parse_quote, spanned::Spanned, token, Attribute, Error, FnArg, Ident, ItemTrait, Lit, Meta, + NestedMeta, Pat, PatType, Result, Signature, TraitItem, TraitItemMethod, Type, }; use proc_macro_crate::{crate_name, FoundCrate}; -use std::{env, collections::{BTreeMap, btree_map::Entry}}; +use std::{ + collections::{btree_map::Entry, BTreeMap}, + env, +}; use quote::quote; @@ -53,8 +56,9 @@ impl<'a> RuntimeInterfaceFunction<'a> { pub fn latest_version(&self) -> (u32, &TraitItemMethod) { ( self.latest_version, - self.versions.get(&self.latest_version) - .expect("If latest_version has a value, the key with this value is in the versions; qed") + self.versions.get(&self.latest_version).expect( + "If latest_version has a value, the key with this value is in the versions; qed", + ), ) } } @@ -70,9 +74,12 @@ impl<'a> RuntimeInterface<'a> { } pub fn all_versions(&self) -> impl Iterator { - self.items.iter().flat_map(|(_, item)| item.versions.iter()).map(|(v, i)| (*v, *i)) + self.items + .iter() + .flat_map(|(_, item)| item.versions.iter()) + .map(|(v, i)| (*v, *i)) } - } +} /// Generates the include for the runtime-interface crate. pub fn generate_runtime_interface_include() -> TokenStream { @@ -88,16 +95,16 @@ pub fn generate_runtime_interface_include() -> TokenStream { Err(e) => { let err = Error::new(Span::call_site(), e).to_compile_error(); quote!( #err ) - } + }, } } /// Generates the access to the `sp-runtime-interface` crate. pub fn generate_crate_access() -> TokenStream { if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { - quote!( sp_runtime_interface ) + quote!(sp_runtime_interface) } else { - quote!( proc_macro_runtime_interface ) + quote!(proc_macro_runtime_interface) } } @@ -109,26 +116,14 @@ pub fn create_exchangeable_host_function_ident(name: &Ident) -> Ident { /// Create the host function identifier for the given function name. pub fn create_host_function_ident(name: &Ident, version: u32, trait_name: &Ident) -> Ident { Ident::new( - &format!( - "ext_{}_{}_version_{}", - trait_name.to_string().to_snake_case(), - name, - version, - ), + &format!("ext_{}_{}_version_{}", trait_name.to_string().to_snake_case(), name, version,), Span::call_site(), ) } /// Create the host function identifier for the given function name. pub fn create_function_ident_with_version(name: &Ident, version: u32) -> Ident { - Ident::new( - &format!( - "{}_version_{}", - name, - version, - ), - Span::call_site(), - ) + Ident::new(&format!("{}_version_{}", name, version,), Span::call_site()) } /// Returns the function arguments of the given `Signature`, minus any `self` arguments. @@ -143,10 +138,8 @@ pub fn get_function_arguments<'a>(sig: &'a Signature) -> impl Iterator(sig: &'a Signature) -> impl Iterator( sig: &'a Signature, ) -> impl Iterator> + 'a { - get_function_arguments(sig) - .map(|pt| pt.ty) - .map(|ty| match *ty { - Type::Reference(type_ref) => type_ref.elem, - _ => ty, - }) + get_function_arguments(sig).map(|pt| pt.ty).map(|ty| match *ty { + Type::Reference(type_ref) => type_ref.elem, + _ => ty, + }) } /// Returns the function argument names and types, minus any `self`. If any of the arguments @@ -183,11 +174,10 @@ pub fn get_function_argument_types_without_ref<'a>( pub fn get_function_argument_names_and_types_without_ref<'a>( sig: &'a Signature, ) -> impl Iterator, Box)> + 'a { - get_function_arguments(sig) - .map(|pt| match *pt.ty { - Type::Reference(type_ref) => (pt.pat, type_ref.elem), - _ => (pt.pat, pt.ty), - }) + get_function_arguments(sig).map(|pt| match *pt.ty { + Type::Reference(type_ref) => (pt.pat, type_ref.elem), + _ => (pt.pat, pt.ty), + }) } /// Returns the `&`/`&mut` for all function argument types, minus the `self` arg. If a function @@ -195,23 +185,18 @@ pub fn get_function_argument_names_and_types_without_ref<'a>( pub fn get_function_argument_types_ref_and_mut<'a>( sig: &'a Signature, ) -> impl Iterator)>> + 'a { - get_function_arguments(sig) - .map(|pt| pt.ty) - .map(|ty| match *ty { - Type::Reference(type_ref) => Some((type_ref.and_token, type_ref.mutability)), - _ => None, - }) + get_function_arguments(sig).map(|pt| pt.ty).map(|ty| match *ty { + Type::Reference(type_ref) => Some((type_ref.and_token, type_ref.mutability)), + _ => None, + }) } /// Returns an iterator over all trait methods for the given trait definition. fn get_trait_methods<'a>(trait_def: &'a ItemTrait) -> impl Iterator { - trait_def - .items - .iter() - .filter_map(|i| match i { - TraitItem::Method(ref method) => Some(method), - _ => None, - }) + trait_def.items.iter().filter_map(|i| match i { + TraitItem::Method(ref method) => Some(method), + _ => None, + }) } /// Parse version attribute. @@ -221,36 +206,34 @@ fn parse_version_attribute(version: &Attribute) -> Result { let meta = version.parse_meta()?; let err = Err(Error::new( - meta.span(), - "Unexpected `version` attribute. The supported format is `#[version(1)]`", - ) - ); + meta.span(), + "Unexpected `version` attribute. The supported format is `#[version(1)]`", + )); match meta { - Meta::List(list) => { + Meta::List(list) => if list.nested.len() != 1 { err } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { i.base10_parse() } else { err - } - }, + }, _ => err, } } /// Return item version (`#[version(X)]`) attribute, if present. fn get_item_version(item: &TraitItemMethod) -> Result> { - item.attrs.iter().find(|attr| attr.path.is_ident("version")) + item.attrs + .iter() + .find(|attr| attr.path.is_ident("version")) .map(|attr| parse_version_attribute(attr)) .transpose() } /// Returns all runtime interface members, with versions. -pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) - -> Result> -{ +pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) -> Result> { let mut functions: BTreeMap> = BTreeMap::new(); for item in get_trait_methods(trait_def) { @@ -258,25 +241,26 @@ pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) let version = get_item_version(item)?.unwrap_or(1); match functions.entry(name.clone()) { - Entry::Vacant(entry) => { entry.insert(RuntimeInterfaceFunction::new(version, item)); }, + Entry::Vacant(entry) => { + entry.insert(RuntimeInterfaceFunction::new(version, item)); + }, Entry::Occupied(mut entry) => { if let Some(existing_item) = entry.get().versions.get(&version) { - let mut err = Error::new( - item.span(), - "Duplicated version attribute", - ); + let mut err = Error::new(item.span(), "Duplicated version attribute"); err.combine(Error::new( existing_item.span(), "Previous version with the same number defined here", )); - return Err(err); + return Err(err) } let interface_item = entry.get_mut(); - if interface_item.latest_version < version { interface_item.latest_version = version; } + if interface_item.latest_version < version { + interface_item.latest_version = version; + } interface_item.versions.insert(version, item); - } + }, } } @@ -286,8 +270,11 @@ pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) if next_expected != *version { return Err(Error::new( item.span(), - format!("Unexpected version attribute: missing version '{}' for this function", next_expected), - )); + format!( + "Unexpected version attribute: missing version '{}' for this function", + next_expected + ), + )) } next_expected += 1; } diff --git a/substrate/primitives/runtime-interface/src/impls.rs b/substrate/primitives/runtime-interface/src/impls.rs index 4dd79aeccb39e46e15429b15b89bc24aab3f6745..40f8e90479f9557cf396f70d0f1c5c55d4fce248 100644 --- a/substrate/primitives/runtime-interface/src/impls.rs +++ b/substrate/primitives/runtime-interface/src/impls.rs @@ -17,14 +17,15 @@ //! Provides implementations for the runtime interface traits. -use crate::{ - RIType, Pointer, pass_by::{PassBy, Codec, Inner, PassByInner, Enum}, - util::{unpack_ptr_and_len, pack_ptr_and_len}, -}; #[cfg(feature = "std")] use crate::host::*; #[cfg(not(feature = "std"))] use crate::wasm::*; +use crate::{ + pass_by::{Codec, Enum, Inner, PassBy, PassByInner}, + util::{pack_ptr_and_len, unpack_ptr_and_len}, + Pointer, RIType, +}; #[cfg(all(not(feature = "std"), not(feature = "disable_target_static_assertions")))] use static_assertions::assert_eq_size; @@ -32,7 +33,7 @@ use static_assertions::assert_eq_size; #[cfg(feature = "std")] use sp_wasm_interface::{FunctionContext, Result}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::{any::TypeId, mem, vec::Vec}; @@ -195,7 +196,7 @@ impl FromFFIValue for Vec { let len = len as usize; if len == 0 { - return Vec::new(); + return Vec::new() } let data = unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }; @@ -230,7 +231,8 @@ impl FromFFIValue for [T] { if TypeId::of::() == TypeId::of::() { Ok(unsafe { mem::transmute(vec) }) } else { - Ok(Vec::::decode(&mut &vec[..]).expect("Wasm to host values are encoded correctly; qed")) + Ok(Vec::::decode(&mut &vec[..]) + .expect("Wasm to host values are encoded correctly; qed")) } } } @@ -247,13 +249,11 @@ impl IntoPreallocatedFFIValue for [u8] { let (ptr, len) = unpack_ptr_and_len(allocated); if (len as usize) < self_instance.len() { - Err( - format!( - "Preallocated buffer is not big enough (given {} vs needed {})!", - len, - self_instance.len() - ) - ) + Err(format!( + "Preallocated buffer is not big enough (given {} vs needed {})!", + len, + self_instance.len() + )) } else { context.write_memory(Pointer::new(ptr), &self_instance) } @@ -367,7 +367,10 @@ impl PassBy for Option { #[impl_trait_for_tuples::impl_for_tuples(30)] #[tuple_types_no_default_trait_bound] -impl PassBy for Tuple where Self: codec::Codec { +impl PassBy for Tuple +where + Self: codec::Codec, +{ type PassBy = Codec; } @@ -511,7 +514,8 @@ macro_rules! for_u128_i128 { type SelfInstance = $type; fn from_ffi_value(context: &mut dyn FunctionContext, arg: u32) -> Result<$type> { - let data = context.read_memory(Pointer::new(arg), mem::size_of::<$type>() as u32)?; + let data = + context.read_memory(Pointer::new(arg), mem::size_of::<$type>() as u32)?; let mut res = [0u8; mem::size_of::<$type>()]; res.copy_from_slice(&data); Ok(<$type>::from_le_bytes(res)) @@ -526,7 +530,7 @@ macro_rules! for_u128_i128 { Ok(addr.into()) } } - } + }; } for_u128_i128!(u128); diff --git a/substrate/primitives/runtime-interface/src/lib.rs b/substrate/primitives/runtime-interface/src/lib.rs index 93b4a8db87e9d609243ae8d0355e1773a90725ca..53b4270fe8a60fb54dbef3edde27408c4a5bf1ff 100644 --- a/substrate/primitives/runtime-interface/src/lib.rs +++ b/substrate/primitives/runtime-interface/src/lib.rs @@ -292,28 +292,28 @@ pub use sp_std; /// the case when that would create a circular dependency. You usually _do not_ want to add this /// flag, as tracing doesn't cost you anything by default anyways (it is added as a no-op) but is /// super useful for debugging later. -/// pub use sp_runtime_interface_proc_macro::runtime_interface; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_externalities::{ - set_and_run_with_externalities, with_externalities, Externalities, ExternalitiesExt, ExtensionStore, + set_and_run_with_externalities, with_externalities, ExtensionStore, Externalities, + ExternalitiesExt, }; #[doc(hidden)] pub use codec; -pub(crate) mod impls; #[cfg(feature = "std")] pub mod host; +pub(crate) mod impls; +pub mod pass_by; #[cfg(any(not(feature = "std"), doc))] pub mod wasm; -pub mod pass_by; mod util; -pub use util::{unpack_ptr_and_len, pack_ptr_and_len}; +pub use util::{pack_ptr_and_len, unpack_ptr_and_len}; /// Something that can be used by the runtime interface as type to communicate between wasm and the /// host. diff --git a/substrate/primitives/runtime-interface/src/pass_by.rs b/substrate/primitives/runtime-interface/src/pass_by.rs index 69485a1a2873f682377f59802c5f32d09f29cac9..0535d1ca8d7fc7c6f1a96c47452ab981393ba3e9 100644 --- a/substrate/primitives/runtime-interface/src/pass_by.rs +++ b/substrate/primitives/runtime-interface/src/pass_by.rs @@ -20,7 +20,10 @@ //! //! [`Codec`], [`Inner`] and [`Enum`] are the provided strategy implementations. -use crate::{RIType, util::{unpack_ptr_and_len, pack_ptr_and_len}}; +use crate::{ + util::{pack_ptr_and_len, unpack_ptr_and_len}, + RIType, +}; #[cfg(feature = "std")] use crate::host::*; @@ -30,7 +33,7 @@ use crate::wasm::*; #[cfg(feature = "std")] use sp_wasm_interface::{FunctionContext, Pointer, Result}; -use sp_std::{marker::PhantomData, convert::TryFrom}; +use sp_std::{convert::TryFrom, marker::PhantomData}; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; @@ -119,18 +122,12 @@ pub trait PassByImpl: RIType { /// Convert the given instance to the ffi value. /// /// For more information see: [`crate::host::IntoFFIValue::into_ffi_value`] - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result; + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result; /// Create `T` from the given ffi value. /// /// For more information see: [`crate::host::FromFFIValue::from_ffi_value`] - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result; + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result; } /// Something that provides a strategy for passing a type between wasm and the host. @@ -220,10 +217,7 @@ pub struct Codec(PhantomData); #[cfg(feature = "std")] impl PassByImpl for Codec { - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result { + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result { let vec = instance.encode(); let ptr = context.allocate_memory(vec.len() as u32)?; context.write_memory(ptr, &vec)?; @@ -231,14 +225,10 @@ impl PassByImpl for Codec { Ok(pack_ptr_and_len(ptr.into(), vec.len() as u32)) } - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { let (ptr, len) = unpack_ptr_and_len(arg); let vec = context.read_memory(Pointer::new(ptr), len)?; - T::decode(&mut &vec[..]) - .map_err(|e| format!("Could not decode value from wasm: {}", e)) + T::decode(&mut &vec[..]).map_err(|e| format!("Could not decode value from wasm: {}", e)) } } @@ -330,35 +320,31 @@ pub struct Inner, I: RIType>(PhantomData<(T, I)>); #[cfg(feature = "std")] impl, I: RIType> PassByImpl for Inner - where I: IntoFFIValue + FromFFIValue +where + I: IntoFFIValue + FromFFIValue, { - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result { + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result { instance.into_inner().into_ffi_value(context) } - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { I::from_ffi_value(context, arg).map(T::from_inner) } } #[cfg(not(feature = "std"))] impl, I: RIType> PassByImpl for Inner - where I: IntoFFIValue + FromFFIValue +where + I: IntoFFIValue + FromFFIValue, { type Owned = I::Owned; fn into_ffi_value(instance: &T) -> WrappedFFIValue { - instance.inner().into_ffi_value() + instance.inner().into_ffi_value() } fn from_ffi_value(arg: Self::FFIType) -> T { - T::from_inner(I::from_ffi_value(arg)) + T::from_inner(I::from_ffi_value(arg)) } } @@ -415,17 +401,11 @@ pub struct Enum + TryFrom>(PhantomData); #[cfg(feature = "std")] impl + TryFrom> PassByImpl for Enum { - fn into_ffi_value( - instance: T, - _: &mut dyn FunctionContext, - ) -> Result { + fn into_ffi_value(instance: T, _: &mut dyn FunctionContext) -> Result { Ok(instance.into()) } - fn from_ffi_value( - _: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { + fn from_ffi_value(_: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { T::try_from(arg).map_err(|_| format!("Invalid enum discriminant: {}", arg)) } } diff --git a/substrate/primitives/runtime-interface/src/wasm.rs b/substrate/primitives/runtime-interface/src/wasm.rs index 387d6901e2f2555fc93dd03aa8a4d0a70f5fcdbf..28613f81a68b27f9fe9bb1c3a61320cd6ccf598a 100644 --- a/substrate/primitives/runtime-interface/src/wasm.rs +++ b/substrate/primitives/runtime-interface/src/wasm.rs @@ -108,7 +108,7 @@ impl ExchangeableFunction { /// # Returns /// /// Returns the original implementation wrapped in [`RestoreImplementation`]. - pub fn replace_implementation(&'static self, new_impl: T) -> RestoreImplementation { + pub fn replace_implementation(&'static self, new_impl: T) -> RestoreImplementation { if let ExchangeableFunctionState::Replaced = self.0.get().1 { panic!("Trying to replace an already replaced implementation!") } @@ -139,6 +139,7 @@ pub struct RestoreImplementation(&'static ExchangeableFunctio impl Drop for RestoreImplementation { fn drop(&mut self) { - self.0.restore_orig_implementation(self.1.take().expect("Value is only taken on drop; qed")); + self.0 + .restore_orig_implementation(self.1.take().expect("Value is only taken on drop; qed")); } } diff --git a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 0a7e2b49bbbbb45aebe88254136bee1c1458e199..8c864fc90e0363ce7ddc1ce2288e684f30fe5f9b 100644 --- a/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -29,8 +29,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only \ + supported with the flag disabled.", + ) } /// This function is not used, but we require it for the compiler to include `sp-io`. diff --git a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs index 65a0e5c5ca4478f698aa0185ca43bf6cffac8bc7..72acdd4ff8d6eea8f8e6b67c538c34f4e5a5290c 100644 --- a/substrate/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/substrate/primitives/runtime-interface/test-wasm/src/lib.rs @@ -22,7 +22,7 @@ use sp_runtime_interface::runtime_interface; #[cfg(not(feature = "std"))] -use sp_std::{prelude::*, mem, convert::TryFrom}; +use sp_std::{convert::TryFrom, mem, prelude::*}; use sp_core::{sr25519::Public, wasm_export_functions}; @@ -33,8 +33,10 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only \ + supported with the flag disabled.", + ) } /// Used in the `test_array_as_mutable_reference` test. diff --git a/substrate/primitives/runtime-interface/test/src/lib.rs b/substrate/primitives/runtime-interface/test/src/lib.rs index a021a93939a10ee401f3ce5c8f969218800f0670..82c50fffeb8d749d0ecc9a77fa3acc52d10e59bf 100644 --- a/substrate/primitives/runtime-interface/test/src/lib.rs +++ b/substrate/primitives/runtime-interface/test/src/lib.rs @@ -20,13 +20,16 @@ use sp_runtime_interface::*; -use sp_runtime_interface_test_wasm::{wasm_binary_unwrap, test_api::HostFunctions}; +use sp_runtime_interface_test_wasm::{test_api::HostFunctions, wasm_binary_unwrap}; use sp_runtime_interface_test_wasm_deprecated::wasm_binary_unwrap as wasm_binary_deprecated_unwrap; -use sp_wasm_interface::HostFunctions as HostFunctionsT; use sc_executor_common::runtime_blob::RuntimeBlob; +use sp_wasm_interface::HostFunctions as HostFunctionsT; -use std::{collections::HashSet, sync::{Arc, Mutex}}; +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; type TestExternalities = sp_state_machine::TestExternalities; @@ -82,7 +85,10 @@ fn test_set_storage() { #[test] fn test_return_value_into_mutable_reference() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_return_value_into_mutable_reference"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_return_value_into_mutable_reference", + ); } #[test] @@ -102,7 +108,8 @@ fn test_return_input_public_key() { #[test] fn host_function_not_found() { - let err = call_wasm_method_with_result::<()>(&wasm_binary_unwrap()[..], "test_return_data").unwrap_err(); + let err = call_wasm_method_with_result::<()>(&wasm_binary_unwrap()[..], "test_return_data") + .unwrap_err(); assert!(err.contains("Instantiation: Export ")); assert!(err.contains(" not found")); @@ -111,41 +118,56 @@ fn host_function_not_found() { #[test] #[should_panic(expected = "Invalid utf8 data provided")] fn test_invalid_utf8_data_should_return_an_error() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_invalid_utf8_data_should_return_an_error"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_invalid_utf8_data_should_return_an_error", + ); } #[test] fn test_overwrite_native_function_implementation() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_overwrite_native_function_implementation"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_overwrite_native_function_implementation", + ); } #[test] fn test_u128_i128_as_parameter_and_return_value() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_u128_i128_as_parameter_and_return_value"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_u128_i128_as_parameter_and_return_value", + ); } #[test] fn test_vec_return_value_memory_is_freed() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_vec_return_value_memory_is_freed"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_vec_return_value_memory_is_freed", + ); } #[test] fn test_encoded_return_value_memory_is_freed() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_encoded_return_value_memory_is_freed"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_encoded_return_value_memory_is_freed", + ); } #[test] fn test_array_return_value_memory_is_freed() { - call_wasm_method::(&wasm_binary_unwrap()[..], "test_array_return_value_memory_is_freed"); + call_wasm_method::( + &wasm_binary_unwrap()[..], + "test_array_return_value_memory_is_freed", + ); } #[test] fn test_versionining_with_new_host_works() { // We call to the new wasm binary with new host function. - call_wasm_method::( - &wasm_binary_unwrap()[..], - "test_versionning_works", - ); + call_wasm_method::(&wasm_binary_unwrap()[..], "test_versionning_works"); // we call to the old wasm binary with a new host functions // old versions of host functions should be called and test should be ok! @@ -158,7 +180,7 @@ fn test_versionining_with_new_host_works() { #[test] fn test_tracing() { use std::fmt; - use tracing::{span::Id as SpanId}; + use tracing::span::Id as SpanId; use tracing_core::field::{Field, Visit}; #[derive(Clone)] @@ -166,9 +188,8 @@ fn test_tracing() { struct FieldConsumer(&'static str, Option); impl Visit for FieldConsumer { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - if field.name() == self.0 { + if field.name() == self.0 { self.1 = Some(format!("{:?}", value)) } } @@ -180,14 +201,16 @@ fn test_tracing() { } impl tracing::subscriber::Subscriber for TracingSubscriber { - fn enabled(&self, _: &tracing::Metadata) -> bool { true } + fn enabled(&self, _: &tracing::Metadata) -> bool { + true + } fn new_span(&self, span: &tracing::span::Attributes) -> tracing::Id { let mut inner = self.0.lock().unwrap(); let id = SpanId::from_u64((inner.spans.len() + 1) as _); let mut f = FieldConsumer("name", None); span.record(&mut f); - inner.spans.insert(f.1.unwrap_or_else(||span.metadata().name().to_owned())); + inner.spans.insert(f.1.unwrap_or_else(|| span.metadata().name().to_owned())); id } diff --git a/substrate/primitives/runtime/src/curve.rs b/substrate/primitives/runtime/src/curve.rs index 326ababcf5d4f343125d30a7f5cf17c30a1e85b2..72d64cf4b8e17547fa159f085cd20488abacae00 100644 --- a/substrate/primitives/runtime/src/curve.rs +++ b/substrate/primitives/runtime/src/curve.rs @@ -17,7 +17,10 @@ //! Provides some utilities to define a piecewise linear function. -use crate::{Perbill, traits::{AtLeast32BitUnsigned, SaturatedConversion}}; +use crate::{ + traits::{AtLeast32BitUnsigned, SaturatedConversion}, + Perbill, +}; use core::ops::Sub; /// Piecewise Linear function in [0, 1] -> [0, 1]. @@ -29,14 +32,15 @@ pub struct PiecewiseLinear<'a> { pub maximum: Perbill, } -fn abs_sub + Clone>(a: N, b: N) -> N where { +fn abs_sub + Clone>(a: N, b: N) -> N where { a.clone().max(b.clone()) - a.min(b) } impl<'a> PiecewiseLinear<'a> { /// Compute `f(n/d)*d` with `n <= d`. This is useful to avoid loss of precision. - pub fn calculate_for_fraction_times_denominator(&self, n: N, d: N) -> N where - N: AtLeast32BitUnsigned + Clone + pub fn calculate_for_fraction_times_denominator(&self, n: N, d: N) -> N + where + N: AtLeast32BitUnsigned + Clone, { let n = n.min(d.clone()); @@ -44,8 +48,7 @@ impl<'a> PiecewiseLinear<'a> { return N::zero() } - let next_point_index = self.points.iter() - .position(|p| n < p.0 * d.clone()); + let next_point_index = self.points.iter().position(|p| n < p.0 * d.clone()); let (prev, next) = if let Some(next_point_index) = next_point_index { if let Some(previous_point_index) = next_point_index.checked_sub(1) { @@ -80,7 +83,8 @@ impl<'a> PiecewiseLinear<'a> { // This is guaranteed not to overflow on whatever values nor lose precision. // `q` must be superior to zero. fn multiply_by_rational_saturating(value: N, p: u32, q: u32) -> N - where N: AtLeast32BitUnsigned + Clone +where + N: AtLeast32BitUnsigned + Clone, { let q = q.max(1); @@ -112,17 +116,14 @@ fn test_multiply_by_rational_saturating() { for value in 0..=div { for p in 0..=div { for q in 1..=div { - let value: u64 = (value as u128 * u64::MAX as u128 / div as u128) - .try_into().unwrap(); - let p = (p as u64 * u32::MAX as u64 / div as u64) - .try_into().unwrap(); - let q = (q as u64 * u32::MAX as u64 / div as u64) - .try_into().unwrap(); + let value: u64 = + (value as u128 * u64::MAX as u128 / div as u128).try_into().unwrap(); + let p = (p as u64 * u32::MAX as u64 / div as u64).try_into().unwrap(); + let q = (q as u64 * u32::MAX as u64 / div as u64).try_into().unwrap(); assert_eq!( multiply_by_rational_saturating(value, p, q), - (value as u128 * p as u128 / q as u128) - .try_into().unwrap_or(u64::MAX) + (value as u128 * p as u128 / q as u128).try_into().unwrap_or(u64::MAX) ); } } @@ -153,10 +154,8 @@ fn test_calculate_for_fraction_times_denominator() { let div = 100u32; for d in 0..=div { for n in 0..=d { - let d: u64 = (d as u128 * u64::MAX as u128 / div as u128) - .try_into().unwrap(); - let n: u64 = (n as u128 * u64::MAX as u128 / div as u128) - .try_into().unwrap(); + let d: u64 = (d as u128 * u64::MAX as u128 / div as u128).try_into().unwrap(); + let n: u64 = (n as u128 * u64::MAX as u128 / div as u128).try_into().unwrap(); let res = curve.calculate_for_fraction_times_denominator(n, d); let expected = formal_calculate_for_fraction_times_denominator(n, d); diff --git a/substrate/primitives/runtime/src/generic/block.rs b/substrate/primitives/runtime/src/generic/block.rs index af4f9e4521e3bd78f42e18469de5b6612f36125c..21a01933bc691190d6694b56f18ec47de8e34918 100644 --- a/substrate/primitives/runtime/src/generic/block.rs +++ b/substrate/primitives/runtime/src/generic/block.rs @@ -23,14 +23,16 @@ use std::fmt; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use sp_std::prelude::*; -use sp_core::RuntimeDebug; -use crate::codec::{Codec, Encode, Decode}; -use crate::traits::{ - self, Member, Block as BlockT, Header as HeaderT, MaybeSerialize, MaybeMallocSizeOf, - NumberFor, +use crate::{ + codec::{Codec, Decode, Encode}, + traits::{ + self, Block as BlockT, Header as HeaderT, MaybeMallocSizeOf, MaybeSerialize, Member, + NumberFor, + }, + Justifications, }; -use crate::Justifications; +use sp_core::RuntimeDebug; +use sp_std::prelude::*; /// Something to identify a block. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 2c3392a1337997517220cabf642a960810b7671c..b2044a6cf74fd10f124a85d7f75e70116e5b734b 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -18,11 +18,13 @@ //! Generic implementation of an extrinsic that has passed the verification //! stage. -use crate::traits::{ - self, Member, MaybeDisplay, SignedExtension, Dispatchable, DispatchInfoOf, PostDispatchInfoOf, - ValidateUnsigned, +use crate::{ + traits::{ + self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, + SignedExtension, ValidateUnsigned, + }, + transaction_validity::{TransactionSource, TransactionValidity}, }; -use crate::transaction_validity::{TransactionValidity, TransactionSource}; /// Definition of something that the external world might want to say; its /// existence implies that it has been checked and is good, particularly with @@ -37,12 +39,11 @@ pub struct CheckedExtrinsic { pub function: Call, } -impl traits::Applyable for - CheckedExtrinsic +impl traits::Applyable for CheckedExtrinsic where AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, - Extra: SignedExtension, + Call: Member + Dispatchable, + Extra: SignedExtension, Origin: From>, { type Call = Call; @@ -64,7 +65,7 @@ where } } - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, diff --git a/substrate/primitives/runtime/src/generic/digest.rs b/substrate/primitives/runtime/src/generic/digest.rs index 8594393c7cdea3924cc630d94d3995bf0fa08fb7..195bf1cbe5da520138ba3f97d0e43d9f7c049dda 100644 --- a/substrate/primitives/runtime/src/generic/digest.rs +++ b/substrate/primitives/runtime/src/generic/digest.rs @@ -22,8 +22,10 @@ use serde::{Deserialize, Serialize}; use sp_std::prelude::*; -use crate::ConsensusEngineId; -use crate::codec::{Decode, Encode, Input, Error}; +use crate::{ + codec::{Decode, Encode, Error, Input}, + ConsensusEngineId, +}; use sp_core::{ChangesTrieConfiguration, RuntimeDebug}; /// Generic header digest. @@ -40,7 +42,7 @@ pub struct Digest { impl Default for Digest { fn default() -> Self { - Self { logs: Vec::new(), } + Self { logs: Vec::new() } } } @@ -61,12 +63,18 @@ impl Digest { } /// Get reference to the first digest item that matches the passed predicate. - pub fn log) -> Option<&T>>(&self, predicate: F) -> Option<&T> { + pub fn log) -> Option<&T>>( + &self, + predicate: F, + ) -> Option<&T> { self.logs().iter().find_map(predicate) } /// Get a conversion of the first digest item that successfully converts using the function. - pub fn convert_first) -> Option>(&self, predicate: F) -> Option { + pub fn convert_first) -> Option>( + &self, + predicate: F, + ) -> Option { self.logs().iter().find_map(predicate) } } @@ -132,16 +140,18 @@ pub enum ChangesTrieSignal { #[cfg(feature = "std")] impl serde::Serialize for DigestItem { - fn serialize(&self, seq: S) -> Result where S: serde::Serializer { - self.using_encoded(|bytes| { - sp_core::bytes::serialize(bytes, seq) - }) + fn serialize(&self, seq: S) -> Result + where + S: serde::Serializer, + { + self.using_encoded(|bytes| sp_core::bytes::serialize(bytes, seq)) } } #[cfg(feature = "std")] impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { - fn deserialize(de: D) -> Result where + fn deserialize(de: D) -> Result + where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; @@ -297,9 +307,7 @@ impl Decode for DigestItem { fn decode(input: &mut I) -> Result { let item_type: DigestItemType = Decode::decode(input)?; match item_type { - DigestItemType::ChangesTrieRoot => Ok(Self::ChangesTrieRoot( - Decode::decode(input)?, - )), + DigestItemType::ChangesTrieRoot => Ok(Self::ChangesTrieRoot(Decode::decode(input)?)), DigestItemType::PreRuntime => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(Self::PreRuntime(vals.0, vals.1)) @@ -307,17 +315,14 @@ impl Decode for DigestItem { DigestItemType::Consensus => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(Self::Consensus(vals.0, vals.1)) - } + }, DigestItemType::Seal => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(Self::Seal(vals.0, vals.1)) }, - DigestItemType::ChangesTrieSignal => Ok(Self::ChangesTrieSignal( - Decode::decode(input)?, - )), - DigestItemType::Other => Ok(Self::Other( - Decode::decode(input)?, - )), + DigestItemType::ChangesTrieSignal => + Ok(Self::ChangesTrieSignal(Decode::decode(input)?)), + DigestItemType::Other => Ok(Self::Other(Decode::decode(input)?)), } } } @@ -376,9 +381,10 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&'a [u8]> { match (id, self) { (OpaqueDigestItemId::Consensus(w), &Self::Consensus(v, s)) | - (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) | - (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) - if v == w => Some(&s[..]), + (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) | + (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) + if v == w => + Some(&s[..]), (OpaqueDigestItemId::Other, &Self::Other(s)) => Some(&s[..]), _ => None, } @@ -395,8 +401,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Returns `None` if this isn't a seal item, the `id` doesn't match or when the decoding fails. pub fn seal_try_to(&self, id: &ConsensusEngineId) -> Option { match self { - Self::Seal(v, s) if *v == id => - Decode::decode(&mut &s[..]).ok(), + Self::Seal(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), _ => None, } } @@ -407,8 +412,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// when the decoding fails. pub fn consensus_try_to(&self, id: &ConsensusEngineId) -> Option { match self { - Self::Consensus(v, s) if *v == id => - Decode::decode(&mut &s[..]).ok(), + Self::Consensus(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), _ => None, } } @@ -419,8 +423,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// when the decoding fails. pub fn pre_runtime_try_to(&self, id: &ConsensusEngineId) -> Option { match self { - Self::PreRuntime(v, s) if *v == id => - Decode::decode(&mut &s[..]).ok(), + Self::PreRuntime(v, s) if *v == id => Decode::decode(&mut &s[..]).ok(), _ => None, } } @@ -482,7 +485,7 @@ mod tests { logs: vec![ DigestItem::ChangesTrieRoot(4), DigestItem::Other(vec![1, 2, 3]), - DigestItem::Seal(*b"test", vec![1, 2, 3]) + DigestItem::Seal(*b"test", vec![1, 2, 3]), ], }; diff --git a/substrate/primitives/runtime/src/generic/era.rs b/substrate/primitives/runtime/src/generic/era.rs index 83a9f22afe5d62c2a2b07e97263a42f687d32889..80ac46125b3615ed8f0505f8094551cb37c78b35 100644 --- a/substrate/primitives/runtime/src/generic/era.rs +++ b/substrate/primitives/runtime/src/generic/era.rs @@ -18,9 +18,9 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use crate::codec::{Decode, Encode, Input, Output, Error}; +use crate::codec::{Decode, Encode, Error, Input, Output}; /// Era period pub type Period = u64; @@ -47,15 +47,13 @@ pub enum Era { Mortal(Period, Phase), } -/* - * E.g. with period == 4: - * 0 10 20 30 40 - * 0123456789012345678901234567890123456789012 - * |...| - * authored -/ \- expiry - * phase = 1 - * n = Q(current - phase, period) + phase - */ +// E.g. with period == 4: +// 0 10 20 30 40 +// 0123456789012345678901234567890123456789012 +// |...| +// authored -/ \- expiry +// phase = 1 +// n = Q(current - phase, period) + phase impl Era { /// Create a new era based on a period (which should be a power of two between 4 and 65536 inclusive) /// and a block number on which it should start (or, for long periods, be shortly after the start). @@ -64,10 +62,7 @@ impl Era { /// does not exceed `BlockHashCount` parameter passed to `system` module, since that /// prunes old blocks and renders transactions immediately invalid. pub fn mortal(period: u64, current: u64) -> Self { - let period = period.checked_next_power_of_two() - .unwrap_or(1 << 16) - .max(4) - .min(1 << 16); + let period = period.checked_next_power_of_two().unwrap_or(1 << 16).max(4).min(1 << 16); let phase = current % period; let quantize_factor = (period >> 12).max(1); let quantized_phase = phase / quantize_factor * quantize_factor; @@ -109,9 +104,10 @@ impl Encode for Era { Self::Immortal => output.push_byte(0), Self::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | ((phase / quantize_factor) << 4) as u16; + let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | + ((phase / quantize_factor) << 4) as u16; encoded.encode_to(output); - } + }, } } } @@ -153,7 +149,7 @@ mod tests { assert!(e.is_immortal()); assert_eq!(e.encode(), vec![0u8]); - assert_eq!(e, Era::decode(&mut&[0u8][..]).unwrap()); + assert_eq!(e, Era::decode(&mut &[0u8][..]).unwrap()); } #[test] @@ -163,7 +159,7 @@ mod tests { let expected = vec![5 + 42 % 16 * 16, 42 / 16]; assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); } #[test] @@ -172,7 +168,7 @@ mod tests { let expected = vec![(14 + 2500 % 16 * 16) as u8, (2500 / 16) as u8]; assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); } #[test] diff --git a/substrate/primitives/runtime/src/generic/header.rs b/substrate/primitives/runtime/src/generic/header.rs index def761b201cebef89c334df73ba123e750006305..07b70337076b4f2fdf2f831e594e97ed9822cf47 100644 --- a/substrate/primitives/runtime/src/generic/header.rs +++ b/substrate/primitives/runtime/src/generic/header.rs @@ -17,20 +17,18 @@ //! Generic implementation of a block header. +use crate::{ + codec::{Codec, Decode, Encode, EncodeAsRef, Error, HasCompact, Input, Output}, + generic::Digest, + traits::{ + self, AtLeast32BitUnsigned, Hash as HashT, MaybeDisplay, MaybeMallocSizeOf, MaybeSerialize, + MaybeSerializeDeserialize, Member, SimpleBitOps, + }, +}; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use crate::codec::{Decode, Encode, Codec, Input, Output, HasCompact, EncodeAsRef, Error}; -use crate::traits::{ - self, Member, AtLeast32BitUnsigned, SimpleBitOps, Hash as HashT, - MaybeSerializeDeserialize, MaybeSerialize, MaybeDisplay, - MaybeMallocSizeOf, -}; -use crate::generic::Digest; use sp_core::U256; -use sp_std::{ - convert::TryFrom, - fmt::Debug, -}; +use sp_std::{convert::TryFrom, fmt::Debug}; /// Abstraction over a block header for a substrate chain. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] @@ -41,9 +39,10 @@ pub struct Header + TryFrom, Hash: HashT> { /// The parent hash. pub parent_hash: Hash::Output, /// The block number. - #[cfg_attr(feature = "std", serde( - serialize_with = "serialize_number", - deserialize_with = "deserialize_number"))] + #[cfg_attr( + feature = "std", + serde(serialize_with = "serialize_number", deserialize_with = "deserialize_number") + )] pub number: Number, /// The state trie merkle root pub state_root: Hash::Output, @@ -71,21 +70,27 @@ where #[cfg(feature = "std")] pub fn serialize_number + TryFrom>( - val: &T, s: S, -) -> Result where S: serde::Serializer { + val: &T, + s: S, +) -> Result +where + S: serde::Serializer, +{ let u256: U256 = (*val).into(); serde::Serialize::serialize(&u256, s) } #[cfg(feature = "std")] -pub fn deserialize_number<'a, D, T: Copy + Into + TryFrom>( - d: D, -) -> Result where D: serde::Deserializer<'a> { +pub fn deserialize_number<'a, D, T: Copy + Into + TryFrom>(d: D) -> Result +where + D: serde::Deserializer<'a>, +{ let u256: U256 = serde::Deserialize::deserialize(d)?; TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) } -impl Decode for Header where +impl Decode for Header +where Number: HasCompact + Copy + Into + TryFrom, Hash: HashT, Hash::Output: Decode, @@ -101,51 +106,92 @@ impl Decode for Header where } } -impl Encode for Header where +impl Encode for Header +where Number: HasCompact + Copy + Into + TryFrom, Hash: HashT, Hash::Output: Encode, { fn encode_to(&self, dest: &mut T) { self.parent_hash.encode_to(dest); - <<::Type as EncodeAsRef<_>>::RefType>::from(&self.number).encode_to(dest); + <<::Type as EncodeAsRef<_>>::RefType>::from(&self.number) + .encode_to(dest); self.state_root.encode_to(dest); self.extrinsics_root.encode_to(dest); self.digest.encode_to(dest); } } -impl codec::EncodeLike for Header where +impl codec::EncodeLike for Header +where Number: HasCompact + Copy + Into + TryFrom, Hash: HashT, Hash::Output: Encode, -{} +{ +} -impl traits::Header for Header where - Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + MaybeDisplay + - AtLeast32BitUnsigned + Codec + Copy + Into + TryFrom + sp_std::str::FromStr + - MaybeMallocSizeOf, +impl traits::Header for Header +where + Number: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + MaybeDisplay + + AtLeast32BitUnsigned + + Codec + + Copy + + Into + + TryFrom + + sp_std::str::FromStr + + MaybeMallocSizeOf, Hash: HashT, - Hash::Output: Default + sp_std::hash::Hash + Copy + Member + Ord + - MaybeSerialize + Debug + MaybeDisplay + SimpleBitOps + Codec + MaybeMallocSizeOf, + Hash::Output: Default + + sp_std::hash::Hash + + Copy + + Member + + Ord + + MaybeSerialize + + Debug + + MaybeDisplay + + SimpleBitOps + + Codec + + MaybeMallocSizeOf, { type Number = Number; type Hash = ::Output; type Hashing = Hash; - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } - fn digest(&self) -> &Digest { &self.digest } + fn digest(&self) -> &Digest { + &self.digest + } fn digest_mut(&mut self) -> &mut Digest { #[cfg(feature = "std")] @@ -160,22 +206,24 @@ impl traits::Header for Header where parent_hash: Self::Hash, digest: Digest, ) -> Self { - Self { - number, - extrinsics_root, - state_root, - parent_hash, - digest, - } + Self { number, extrinsics_root, state_root, parent_hash, digest } } } -impl Header where - Number: Member + sp_std::hash::Hash + Copy + MaybeDisplay + AtLeast32BitUnsigned + Codec + - Into + TryFrom, +impl Header +where + Number: Member + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32BitUnsigned + + Codec + + Into + + TryFrom, Hash: HashT, - Hash::Output: Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, - { + Hash::Output: + Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, +{ /// Convenience helper for computing the hash of the header without having /// to import the trait. pub fn hash(&self) -> Hash::Output { diff --git a/substrate/primitives/runtime/src/generic/mod.rs b/substrate/primitives/runtime/src/generic/mod.rs index c4b28a06c901f2ac4cbcfc998c64e60ef1074d1d..deaecd65e478a30319f395b1b42c2127098a917c 100644 --- a/substrate/primitives/runtime/src/generic/mod.rs +++ b/substrate/primitives/runtime/src/generic/mod.rs @@ -19,22 +19,22 @@ //! Generic implementations of Extrinsic/Header/Block. // end::description[] -mod unchecked_extrinsic; -mod era; -mod checked_extrinsic; -mod header; mod block; +mod checked_extrinsic; mod digest; +mod era; +mod header; #[cfg(test)] mod tests; +mod unchecked_extrinsic; -pub use self::unchecked_extrinsic::{UncheckedExtrinsic, SignedPayload}; -pub use self::era::{Era, Phase}; -pub use self::checked_extrinsic::CheckedExtrinsic; -pub use self::header::Header; -pub use self::block::{Block, SignedBlock, BlockId}; -pub use self::digest::{ - Digest, DigestItem, DigestItemRef, OpaqueDigestItemId, ChangesTrieSignal, +pub use self::{ + block::{Block, BlockId, SignedBlock}, + checked_extrinsic::CheckedExtrinsic, + digest::{ChangesTrieSignal, Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, + era::{Era, Phase}, + header::Header, + unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, }; use crate::codec::Encode; diff --git a/substrate/primitives/runtime/src/generic/tests.rs b/substrate/primitives/runtime/src/generic/tests.rs index ec31e7de48524c7f8ade94790394ca7aee3a59e0..095bcb717bb1100a9bc594ef30792bfd9d637a28 100644 --- a/substrate/primitives/runtime/src/generic/tests.rs +++ b/substrate/primitives/runtime/src/generic/tests.rs @@ -17,27 +17,23 @@ //! Tests for the generic implementations of Extrinsic/Header/Block. +use super::DigestItem; use crate::codec::{Decode, Encode}; use sp_core::H256; -use super::DigestItem; #[test] fn system_digest_item_encoding() { let item = DigestItem::ChangesTrieRoot::(H256::default()); let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::ChangesTrieRoot - 2, - // trie root - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - ]); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::ChangesTrieRoot + 2, // trie root + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ] + ); let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); @@ -47,14 +43,15 @@ fn system_digest_item_encoding() { fn non_system_digest_item_encoding() { let item = DigestItem::Other::(vec![10, 20, 30]); let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::Other - 0, - // length of other data - 12, - // authorities - 10, 20, 30, - ]); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::Other + 0, // length of other data + 12, // authorities + 10, 20, 30, + ] + ); let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index d6164d0b51cc2b81b3f796d7f4bd91d3b933c182..68ab8447cfbcedddbd3df022194286f7a62bf2fa 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -17,18 +17,18 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. -use sp_std::{fmt, prelude::*}; -use sp_io::hashing::blake2_256; -use codec::{Decode, Encode, EncodeLike, Input, Error}; use crate::{ + generic::CheckedExtrinsic, traits::{ - self, Member, MaybeDisplay, SignedExtension, Checkable, Extrinsic, ExtrinsicMetadata, - IdentifyAccount, + self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, + SignedExtension, }, - generic::CheckedExtrinsic, - transaction_validity::{TransactionValidityError, InvalidTransaction}, + transaction_validity::{InvalidTransaction, TransactionValidityError}, OpaqueExtrinsic, }; +use codec::{Decode, Encode, EncodeLike, Error, Input}; +use sp_io::hashing::blake2_256; +use sp_std::{fmt, prelude::*}; /// Current version of the [`UncheckedExtrinsic`] format. const EXTRINSIC_VERSION: u8 = 4; @@ -38,7 +38,7 @@ const EXTRINSIC_VERSION: u8 = 4; #[derive(PartialEq, Eq, Clone)] pub struct UncheckedExtrinsic where - Extra: SignedExtension + Extra: SignedExtension, { /// The signature, address, number of extrinsics have come before from /// the same signer and an era describing the longevity of this transaction, @@ -52,7 +52,7 @@ where impl parity_util_mem::MallocSizeOf for UncheckedExtrinsic where - Extra: SignedExtension + Extra: SignedExtension, { fn size_of(&self, _ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { // Instantiated only in runtime. @@ -64,24 +64,13 @@ impl UncheckedExtrinsic { /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed( - function: Call, - signed: Address, - signature: Signature, - extra: Extra - ) -> Self { - Self { - signature: Some((signed, signature, extra)), - function, - } + pub fn new_signed(function: Call, signed: Address, signature: Signature, extra: Extra) -> Self { + Self { signature: Some((signed, signature, extra)), function } } /// New instance of an unsigned extrinsic aka "inherent". pub fn new_unsigned(function: Call) -> Self { - Self { - signature: None, - function, - } + Self { signature: None, function } } } @@ -90,11 +79,7 @@ impl Extrinsic { type Call = Call; - type SignaturePayload = ( - Address, - Signature, - Extra, - ); + type SignaturePayload = (Address, Signature, Extra); fn is_signed(&self) -> Option { Some(self.signature.is_some()) @@ -109,18 +94,16 @@ impl Extrinsic } } -impl - Checkable -for - UncheckedExtrinsic +impl Checkable + for UncheckedExtrinsic where Address: Member + MaybeDisplay, Call: Encode + Member, Signature: Member + traits::Verify, - ::Signer: IdentifyAccount, - Extra: SignedExtension, + ::Signer: IdentifyAccount, + Extra: SignedExtension, AccountId: Member + MaybeDisplay, - Lookup: traits::Lookup, + Lookup: traits::Lookup, { type Checked = CheckedExtrinsic; @@ -134,23 +117,17 @@ where } let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { - signed: Some((signed, extra)), - function, - } - } - None => CheckedExtrinsic { - signed: None, - function: self.function, + CheckedExtrinsic { signed: Some((signed, extra)), function } }, + None => CheckedExtrinsic { signed: None, function: self.function }, }) } } impl ExtrinsicMetadata for UncheckedExtrinsic - where - Extra: SignedExtension, +where + Extra: SignedExtension, { const VERSION: u8 = EXTRINSIC_VERSION; type SignedExtensions = Extra; @@ -161,13 +138,10 @@ impl ExtrinsicMetadata /// Note that the payload that we sign to produce unchecked extrinsic signature /// is going to be different than the `SignaturePayload` - so the thing the extrinsic /// actually contains. -pub struct SignedPayload(( - Call, - Extra, - Extra::AdditionalSigned, -)); +pub struct SignedPayload((Call, Extra, Extra::AdditionalSigned)); -impl SignedPayload where +impl SignedPayload +where Call: Encode, Extra: SignedExtension, { @@ -191,7 +165,8 @@ impl SignedPayload where } } -impl Encode for SignedPayload where +impl Encode for SignedPayload +where Call: Encode, Extra: SignedExtension, { @@ -213,10 +188,10 @@ impl EncodeLike for SignedPayload where Call: Encode, Extra: SignedExtension, -{} +{ +} -impl Decode - for UncheckedExtrinsic +impl Decode for UncheckedExtrinsic where Address: Decode, Signature: Decode, @@ -235,7 +210,7 @@ where let is_signed = version & 0b1000_0000 != 0; let version = version & 0b0111_1111; if version != EXTRINSIC_VERSION { - return Err("Invalid transaction version".into()); + return Err("Invalid transaction version".into()) } Ok(Self { @@ -245,8 +220,7 @@ where } } -impl Encode - for UncheckedExtrinsic +impl Encode for UncheckedExtrinsic where Address: Encode, Signature: Encode, @@ -260,10 +234,10 @@ where Some(s) => { v.push(EXTRINSIC_VERSION | 0b1000_0000); s.encode_to(v); - } + }, None => { v.push(EXTRINSIC_VERSION & 0b0111_1111); - } + }, } self.function.encode_to(v); }) @@ -277,22 +251,27 @@ where Signature: Encode, Call: Encode, Extra: SignedExtension, -{} +{ +} #[cfg(feature = "std")] impl serde::Serialize for UncheckedExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } #[cfg(feature = "std")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> serde::Deserialize<'a> - for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> + serde::Deserialize<'a> for UncheckedExtrinsic { - fn deserialize(de: D) -> Result where + fn deserialize(de: D) -> Result + where D: serde::Deserializer<'a>, { let r = sp_core::bytes::deserialize(de)?; @@ -327,21 +306,22 @@ where Extra: SignedExtension, { fn from(extrinsic: UncheckedExtrinsic) -> Self { - Self::from_bytes(extrinsic.encode().as_slice()) - .expect( - "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ - raw Vec encoding; qed" - ) + Self::from_bytes(extrinsic.encode().as_slice()).expect( + "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ + raw Vec encoding; qed", + ) } } #[cfg(test)] mod tests { use super::*; + use crate::{ + codec::{Decode, Encode}, + testing::TestSignature as TestSig, + traits::{IdentityLookup, SignedExtension}, + }; use sp_io::hashing::blake2_256; - use crate::codec::{Encode, Decode}; - use crate::traits::{SignedExtension, IdentityLookup}; - use crate::testing::TestSignature as TestSig; type TestContext = IdentityLookup; type TestAccountId = u64; @@ -359,7 +339,9 @@ mod tests { type AdditionalSigned = (); type Pre = (); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } } type Ex = UncheckedExtrinsic; @@ -378,7 +360,7 @@ mod tests { vec![0u8; 0], TEST_ACCOUNT, TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra + TestExtra, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); @@ -389,9 +371,11 @@ mod tests { let ux = Ex::new_signed( vec![0u8; 0], TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 257], TestExtra) - .using_encoded(blake2_256)[..].to_owned()), - TestExtra + TestSig( + TEST_ACCOUNT, + (vec![0u8; 257], TestExtra).using_encoded(blake2_256)[..].to_owned(), + ), + TestExtra, ); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 6ad721079fb7690cc01a85185112621dfc973626..1baab238d8ccefaeb5d5e87af96762332162d97b 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -19,10 +19,10 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] - // to allow benchmarking #![cfg_attr(feature = "bench", feature(test))] -#[cfg(feature = "bench")] extern crate test; +#[cfg(feature = "bench")] +extern crate test; #[doc(hidden)] pub use codec; @@ -41,22 +41,26 @@ pub use sp_application_crypto as app_crypto; #[cfg(feature = "std")] pub use sp_core::storage::{Storage, StorageChild}; -use sp_std::prelude::*; -use sp_std::convert::TryFrom; -use sp_core::{crypto::{self, Public}, ed25519, sr25519, ecdsa, hash::{H256, H512}}; +use sp_core::{ + crypto::{self, Public}, + ecdsa, ed25519, + hash::{H256, H512}, + sr25519, +}; +use sp_std::{convert::TryFrom, prelude::*}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; pub mod curve; pub mod generic; +mod multiaddress; pub mod offchain; +pub mod runtime_logger; +mod runtime_string; #[cfg(feature = "std")] pub mod testing; pub mod traits; pub mod transaction_validity; -mod runtime_string; -mod multiaddress; -pub mod runtime_logger; pub use crate::runtime_string::*; @@ -64,25 +68,28 @@ pub use crate::runtime_string::*; pub use multiaddress::MultiAddress; /// Re-export these since they're only "kind of" generic. -pub use generic::{DigestItem, Digest}; +pub use generic::{Digest, DigestItem}; +pub use sp_application_crypto::{BoundToRuntimeAppPublic, RuntimeAppPublic}; /// Re-export this since it's part of the API of this crate. -pub use sp_core::{TypeId, crypto::{key_types, KeyTypeId, CryptoType, CryptoTypeId, AccountId32}}; -pub use sp_application_crypto::{RuntimeAppPublic, BoundToRuntimeAppPublic}; +pub use sp_core::{ + crypto::{key_types, AccountId32, CryptoType, CryptoTypeId, KeyTypeId}, + TypeId, +}; /// Re-export `RuntimeDebug`, to avoid dependency clutter. pub use sp_core::RuntimeDebug; +/// Re-export big_uint stuff. +pub use sp_arithmetic::biguint; +/// Re-export 128 bit helpers. +pub use sp_arithmetic::helpers_128bit; /// Re-export top-level arithmetic stuff. pub use sp_arithmetic::{ - PerThing, Perquintill, Perbill, Permill, Percent, PerU16, InnerOf, UpperOf, - Rational128, FixedI64, FixedI128, FixedU128, FixedPointNumber, FixedPointOperand, - traits::SaturatedConversion, + traits::SaturatedConversion, FixedI128, FixedI64, FixedPointNumber, FixedPointOperand, + FixedU128, InnerOf, PerThing, PerU16, Perbill, Percent, Permill, Perquintill, Rational128, + UpperOf, }; -/// Re-export 128 bit helpers. -pub use sp_arithmetic::helpers_128bit; -/// Re-export big_uint stuff. -pub use sp_arithmetic::biguint; pub use either::Either; @@ -119,7 +126,7 @@ impl Justifications { /// not inserted. pub fn append(&mut self, justification: Justification) -> bool { if self.get(justification.0).is_some() { - return false; + return false } self.0.push(justification); true @@ -153,11 +160,11 @@ impl From for Justifications { } } -use traits::{Verify, Lazy}; +use traits::{Lazy, Verify}; -#[cfg(feature = "std")] -pub use serde::{Serialize, Deserialize, de::DeserializeOwned}; use crate::traits::IdentifyAccount; +#[cfg(feature = "std")] +pub use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// Complex storage builder stuff. #[cfg(feature = "std")] @@ -169,10 +176,7 @@ pub trait BuildStorage { Ok(storage) } /// Assimilate the storage for this module into pre-existing overlays. - fn assimilate_storage( - &self, - storage: &mut sp_core::storage::Storage, - ) -> Result<(), String>; + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String>; } /// Something that can build the genesis storage of a module. @@ -187,17 +191,14 @@ pub trait BuildModuleGenesisStorage: Sized { #[cfg(feature = "std")] impl BuildStorage for sp_core::storage::Storage { - fn assimilate_storage( - &self, - storage: &mut sp_core::storage::Storage, - )-> Result<(), String> { + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); for (k, other_map) in self.children_default.iter() { let k = k.clone(); if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); if !map.child_info.try_update(&other_map.child_info) { - return Err("Incompatible child info update".to_string()); + return Err("Incompatible child info update".to_string()) } } else { storage.children_default.insert(k, other_map.clone()); @@ -209,10 +210,7 @@ impl BuildStorage for sp_core::storage::Storage { #[cfg(feature = "std")] impl BuildStorage for () { - fn assimilate_storage( - &self, - _: &mut sp_core::storage::Storage, - ) -> Result<(), String> { + fn assimilate_storage(&self, _: &mut sp_core::storage::Storage) -> Result<(), String> { Err("`assimilate_storage` not implemented for `()`".into()) } } @@ -241,7 +239,11 @@ impl From for MultiSignature { impl TryFrom for ed25519::Signature { type Error = (); fn try_from(m: MultiSignature) -> Result { - if let MultiSignature::Ed25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSignature::Ed25519(x) = m { + Ok(x) + } else { + Err(()) + } } } @@ -254,7 +256,11 @@ impl From for MultiSignature { impl TryFrom for sr25519::Signature { type Error = (); fn try_from(m: MultiSignature) -> Result { - if let MultiSignature::Sr25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSignature::Sr25519(x) = m { + Ok(x) + } else { + Err(()) + } } } @@ -267,7 +273,11 @@ impl From for MultiSignature { impl TryFrom for ecdsa::Signature { type Error = (); fn try_from(m: MultiSignature) -> Result { - if let MultiSignature::Ecdsa(x) = m { Ok(x) } else { Err(()) } + if let MultiSignature::Ecdsa(x) = m { + Ok(x) + } else { + Err(()) + } } } @@ -333,7 +343,11 @@ impl From for MultiSigner { impl TryFrom for ed25519::Public { type Error = (); fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Ed25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSigner::Ed25519(x) = m { + Ok(x) + } else { + Err(()) + } } } @@ -346,7 +360,11 @@ impl From for MultiSigner { impl TryFrom for sr25519::Public { type Error = (); fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Sr25519(x) = m { Ok(x) } else { Err(()) } + if let MultiSigner::Sr25519(x) = m { + Ok(x) + } else { + Err(()) + } } } @@ -359,7 +377,11 @@ impl From for MultiSigner { impl TryFrom for ecdsa::Public { type Error = (); fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Ecdsa(x) = m { Ok(x) } else { Err(()) } + if let MultiSigner::Ecdsa(x) = m { + Ok(x) + } else { + Err(()) + } } } @@ -378,17 +400,19 @@ impl Verify for MultiSignature { type Signer = MultiSigner; fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { match (self, signer) { - (Self::Ed25519(ref sig), who) => sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())), - (Self::Sr25519(ref sig), who) => sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())), + (Self::Ed25519(ref sig), who) => + sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())), + (Self::Sr25519(ref sig), who) => + sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())), (Self::Ecdsa(ref sig), who) => { let m = sp_io::hashing::blake2_256(msg.get()); match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { Ok(pubkey) => - &sp_io::hashing::blake2_256(pubkey.as_ref()) - == >::as_ref(who), + &sp_io::hashing::blake2_256(pubkey.as_ref()) == + >::as_ref(who), _ => false, } - } + }, } } } @@ -404,10 +428,10 @@ impl Verify for AnySignature { let msg = msg.get(); sr25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) .map(|s| s.verify(msg, signer)) - .unwrap_or(false) - || ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) - .map(|s| s.verify(msg, &ed25519::Public::from_slice(signer.as_ref()))) - .unwrap_or(false) + .unwrap_or(false) || + ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) + .map(|s| s.verify(msg, &ed25519::Public::from_slice(signer.as_ref()))) + .unwrap_or(false) } } @@ -443,7 +467,11 @@ pub type DispatchResultWithInfo = sp_std::result::Result where - Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +pub struct DispatchErrorWithPostInfo +where + Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { /// Additional information about the `Dispatchable` which is only known post dispatch. pub post_info: Info, @@ -485,22 +514,20 @@ impl DispatchError { /// Return the same error but without the attached message. pub fn stripped(self) -> Self { match self { - DispatchError::Module { index, error, message: Some(_) } - => DispatchError::Module { index, error, message: None }, + DispatchError::Module { index, error, message: Some(_) } => + DispatchError::Module { index, error, message: None }, m => m, } } } -impl From for DispatchErrorWithPostInfo where +impl From for DispatchErrorWithPostInfo +where T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable + Default, - E: Into + E: Into, { fn from(error: E) -> Self { - Self { - post_info: Default::default(), - error: error.into(), - } + Self { post_info: Default::default(), error: error.into() } } } @@ -605,8 +632,9 @@ impl From for &'static str { } } -impl From> for &'static str where - T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +impl From> for &'static str +where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { fn from(err: DispatchErrorWithPostInfo) -> &'static str { err.error.into() @@ -626,7 +654,7 @@ impl traits::Printable for DispatchError { if let Some(msg) = message { msg.print(); } - } + }, Self::ConsumerRemaining => "Consumer remaining".print(), Self::NoProviders => "No providers".print(), Self::Token(e) => { @@ -636,13 +664,14 @@ impl traits::Printable for DispatchError { Self::Arithmetic(e) => { "Arithmetic error: ".print(); <&'static str>::from(*e).print(); - } + }, } } } -impl traits::Printable for DispatchErrorWithPostInfo where - T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +impl traits::Printable for DispatchErrorWithPostInfo +where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { fn print(&self) { self.error.print(); @@ -704,7 +733,8 @@ pub type DispatchOutcome = Result<(), DispatchError>; /// - The sender doesn't have enough funds to pay the transaction inclusion fee. Including such /// a transaction in the block doesn't make sense. /// - The extrinsic supplied a bad signature. This transaction won't become valid ever. -pub type ApplyExtrinsicResult = Result; +pub type ApplyExtrinsicResult = + Result; /// Same as `ApplyExtrinsicResult` but augmented with `PostDispatchInfo` on success. pub type ApplyExtrinsicResultWithInfo = @@ -715,7 +745,7 @@ pub type ApplyExtrinsicResultWithInfo = pub fn verify_encoded_lazy( sig: &V, item: &T, - signer: &::AccountId + signer: &::AccountId, ) -> bool { // The `Lazy` trait expresses something like `X: FnMut &'a T>`. // unfortunately this is a lifetime relationship that can't @@ -732,10 +762,7 @@ pub fn verify_encoded_lazy( } } - sig.verify( - LazyEncode { inner: || item.encode(), encoded: None }, - signer, - ) + sig.verify(LazyEncode { inner: || item.encode(), encoded: None }, signer) } /// Checks that `$x` is equal to `$y` with an error rate of `$error`. @@ -802,14 +829,20 @@ impl sp_std::fmt::Debug for OpaqueExtrinsic { #[cfg(feature = "std")] impl ::serde::Serialize for OpaqueExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { codec::Encode::using_encoded(&self.0, |bytes| ::sp_core::bytes::serialize(bytes, seq)) } } #[cfg(feature = "std")] impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { - fn deserialize(de: D) -> Result where D: ::serde::Deserializer<'a> { + fn deserialize(de: D) -> Result + where + D: ::serde::Deserializer<'a>, + { let r = ::sp_core::bytes::deserialize(de)?; Decode::decode(&mut &r[..]) .map_err(|e| ::serde::de::Error::custom(format!("Decode error: {}", e))) @@ -881,7 +914,7 @@ impl TransactionOutcome { #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode}; + use codec::{Decode, Encode}; use sp_core::crypto::Pair; #[test] @@ -892,22 +925,11 @@ mod tests { #[test] fn dispatch_error_encoding() { - let error = DispatchError::Module { - index: 1, - error: 2, - message: Some("error message"), - }; + let error = DispatchError::Module { index: 1, error: 2, message: Some("error message") }; let encoded = error.encode(); let decoded = DispatchError::decode(&mut &encoded[..]).unwrap(); assert_eq!(encoded, vec![3, 1, 2]); - assert_eq!( - decoded, - DispatchError::Module { - index: 1, - error: 2, - message: None, - }, - ); + assert_eq!(decoded, DispatchError::Module { index: 1, error: 2, message: None },); } #[test] @@ -947,7 +969,7 @@ mod tests { // Ignores `message` field in `Module` variant. assert_eq!( Module { index: 1, error: 1, message: Some("foo") }, - Module { index: 1, error: 1, message: None}, + Module { index: 1, error: 1, message: None }, ); } @@ -971,17 +993,13 @@ mod tests { #[should_panic(expected = "Signature verification has not been called")] fn batching_still_finishes_when_not_called_directly() { let mut ext = sp_state_machine::BasicExternalities::default(); - ext.register_extension( - sp_core::traits::TaskExecutorExt::new(sp_core::testing::TaskExecutor::new()), - ); + ext.register_extension(sp_core::traits::TaskExecutorExt::new( + sp_core::testing::TaskExecutor::new(), + )); ext.execute_with(|| { let _batching = SignatureBatching::start(); - sp_io::crypto::sr25519_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); + sp_io::crypto::sr25519_verify(&Default::default(), &Vec::new(), &Default::default()); }); } @@ -989,9 +1007,9 @@ mod tests { #[should_panic(expected = "Hey, I'm an error")] fn batching_does_not_panic_while_thread_is_already_panicking() { let mut ext = sp_state_machine::BasicExternalities::default(); - ext.register_extension( - sp_core::traits::TaskExecutorExt::new(sp_core::testing::TaskExecutor::new()), - ); + ext.register_extension(sp_core::traits::TaskExecutorExt::new( + sp_core::testing::TaskExecutor::new(), + )); ext.execute_with(|| { let _batching = SignatureBatching::start(); diff --git a/substrate/primitives/runtime/src/multiaddress.rs b/substrate/primitives/runtime/src/multiaddress.rs index e1a4c81a5f9ae232fa0727b3be581670489c23a3..8c866b98ed85e5808751c410777769c969587815 100644 --- a/substrate/primitives/runtime/src/multiaddress.rs +++ b/substrate/primitives/runtime/src/multiaddress.rs @@ -17,7 +17,7 @@ //! MultiAddress type is a wrapper for multiple downstream account formats. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::vec::Vec; /// A multi-format address wrapper for on-chain accounts. @@ -46,8 +46,10 @@ where use sp_core::hexdisplay::HexDisplay; match self { Self::Raw(inner) => write!(f, "MultiAddress::Raw({})", HexDisplay::from(inner)), - Self::Address32(inner) => write!(f, "MultiAddress::Address32({})", HexDisplay::from(inner)), - Self::Address20(inner) => write!(f, "MultiAddress::Address20({})", HexDisplay::from(inner)), + Self::Address32(inner) => + write!(f, "MultiAddress::Address32({})", HexDisplay::from(inner)), + Self::Address20(inner) => + write!(f, "MultiAddress::Address20({})", HexDisplay::from(inner)), _ => write!(f, "{:?}", self), } } diff --git a/substrate/primitives/runtime/src/offchain/http.rs b/substrate/primitives/runtime/src/offchain/http.rs index a346460897d5830c5ed7b661b02bb54e559dee3e..7b305ebd9ccb8bef129d828435ef19b97f606c35 100644 --- a/substrate/primitives/runtime/src/offchain/http.rs +++ b/substrate/primitives/runtime/src/offchain/http.rs @@ -48,17 +48,15 @@ //! assert_eq!(body.error(), &None); //! ``` -use sp_std::str; -use sp_std::prelude::Vec; +use sp_core::{ + offchain::{ + HttpError, HttpRequestId as RequestId, HttpRequestStatus as RequestStatus, Timestamp, + }, + RuntimeDebug, +}; #[cfg(not(feature = "std"))] use sp_std::prelude::vec; -use sp_core::RuntimeDebug; -use sp_core::offchain::{ - Timestamp, - HttpRequestId as RequestId, - HttpRequestStatus as RequestStatus, - HttpError, -}; +use sp_std::{prelude::Vec, str}; /// Request method (HTTP verb) #[derive(Clone, PartialEq, Eq, RuntimeDebug)] @@ -103,10 +101,7 @@ mod header { impl Header { /// Creates new header given it's name and value. pub fn new(name: &str, value: &str) -> Self { - Header { - name: name.as_bytes().to_vec(), - value: value.as_bytes().to_vec(), - } + Header { name: name.as_bytes().to_vec(), value: value.as_bytes().to_vec() } } /// Returns the name of this header. @@ -166,13 +161,7 @@ impl<'a, T> Request<'a, T> { pub fn post(url: &'a str, body: T) -> Self { let req: Request = Request::default(); - Request { - url, - body, - method: Method::Post, - headers: req.headers, - deadline: req.deadline, - } + Request { url, body, method: Method::Post, headers: req.headers, deadline: req.deadline } } } @@ -213,7 +202,7 @@ impl<'a, T: Default> Request<'a, T> { } } -impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { +impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { /// Send the request and return a handle. /// /// Err is returned in case the deadline is reached @@ -222,19 +211,13 @@ impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { let meta = &[]; // start an http request. - let id = sp_io::offchain::http_request_start( - self.method.as_ref(), - self.url, - meta, - ).map_err(|_| HttpError::IoError)?; + let id = sp_io::offchain::http_request_start(self.method.as_ref(), self.url, meta) + .map_err(|_| HttpError::IoError)?; // add custom headers for header in &self.headers { - sp_io::offchain::http_request_add_header( - id, - header.name(), - header.value(), - ).map_err(|_| HttpError::IoError)? + sp_io::offchain::http_request_add_header(id, header.name(), header.value()) + .map_err(|_| HttpError::IoError)? } // write body @@ -245,9 +228,7 @@ impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { // finalize the request sp_io::offchain::http_request_write_body(id, &[], self.deadline)?; - Ok(PendingRequest { - id, - }) + Ok(PendingRequest { id }) } } @@ -285,8 +266,13 @@ impl PendingRequest { /// Attempts to wait for the request to finish, /// but will return `Err` in case the deadline is reached. - pub fn try_wait(self, deadline: impl Into>) -> Result { - Self::try_wait_all(vec![self], deadline).pop().expect("One request passed, one status received; qed") + pub fn try_wait( + self, + deadline: impl Into>, + ) -> Result { + Self::try_wait_all(vec![self], deadline) + .pop() + .expect("One request passed, one status received; qed") } /// Wait for all provided requests. @@ -305,7 +291,7 @@ impl PendingRequest { /// Requests that are complete will resolve to an `Ok` others will return a `DeadlineReached` error. pub fn try_wait_all( requests: Vec, - deadline: impl Into> + deadline: impl Into>, ) -> Vec> { let ids = requests.iter().map(|r| r.id).collect::>(); let statuses = sp_io::offchain::http_response_wait(&ids, deadline.into()); @@ -336,19 +322,13 @@ pub struct Response { impl Response { fn new(id: RequestId, code: u16) -> Self { - Self { - id, - code, - headers: None, - } + Self { id, code, headers: None } } /// Retrieve the headers for this response. pub fn headers(&mut self) -> &Headers { if self.headers.is_none() { - self.headers = Some( - Headers { raw: sp_io::offchain::http_response_headers(self.id) }, - ); + self.headers = Some(Headers { raw: sp_io::offchain::http_response_headers(self.id) }); } self.headers.as_ref().expect("Headers were just set; qed") } @@ -363,7 +343,7 @@ impl Response { /// /// Note that reading the body may return `None` in following cases: /// 1. Either the deadline you've set is reached (check via `#error`; -/// In such case you can resume the reader by setting a new deadline) +/// In such case you can resume the reader by setting a new deadline) /// 2. Or because of IOError. In such case the reader is not resumable and will keep /// returning `None`. /// 3. The body has been returned. The reader will keep returning `None`. @@ -423,32 +403,28 @@ impl Iterator for ResponseBody { fn next(&mut self) -> Option { if self.error.is_some() { - return None; + return None } if self.filled_up_to.is_none() { - let result = sp_io::offchain::http_response_read_body( - self.id, - &mut self.buffer, - self.deadline); + let result = + sp_io::offchain::http_response_read_body(self.id, &mut self.buffer, self.deadline); match result { Err(e) => { self.error = Some(e); - return None; - } - Ok(0) => { - return None; - } + return None + }, + Ok(0) => return None, Ok(size) => { self.position = 0; self.filled_up_to = Some(size as usize); - } + }, } } if Some(self.position) == self.filled_up_to { self.filled_up_to = None; - return self.next(); + return self.next() } let result = self.buffer[self.position]; @@ -508,7 +484,8 @@ impl<'a> HeadersIterator<'a> { /// /// Note that you have to call `next` prior to calling this pub fn current(&self) -> Option<(&str, &str)> { - self.collection.get(self.index?) + self.collection + .get(self.index?) .map(|val| (str::from_utf8(&val.0).unwrap_or(""), str::from_utf8(&val.1).unwrap_or(""))) } } @@ -516,11 +493,8 @@ impl<'a> HeadersIterator<'a> { #[cfg(test)] mod tests { use super::*; + use sp_core::offchain::{testing, OffchainWorkerExt}; use sp_io::TestExternalities; - use sp_core::offchain::{ - OffchainWorkerExt, - testing, - }; #[test] fn should_send_a_basic_request_and_get_response() { @@ -530,10 +504,7 @@ mod tests { t.execute_with(|| { let request: Request = Request::get("http://localhost:1234"); - let pending = request - .add_header("X-Auth", "hunter2") - .send() - .unwrap(); + let pending = request.add_header("X-Auth", "hunter2").send().unwrap(); // make sure it's sent correctly state.write().fulfill_pending_request( 0, diff --git a/substrate/primitives/runtime/src/offchain/storage.rs b/substrate/primitives/runtime/src/offchain/storage.rs index c6ed10c5be26f2864ef188dce68467fbfdbdd9ff..3bc5b10f161f770322d7f7f694e9e454c9647745 100644 --- a/substrate/primitives/runtime/src/offchain/storage.rs +++ b/substrate/primitives/runtime/src/offchain/storage.rs @@ -44,7 +44,7 @@ pub enum MutateStorageError { /// The function given to us to create the value to be stored failed. /// May be used to signal that having looked at the existing value, /// they don't want to mutate it. - ValueFunctionFailed(E) + ValueFunctionFailed(E), } impl<'a> StorageValueRef<'a> { @@ -64,9 +64,7 @@ impl<'a> StorageValueRef<'a> { /// if you happen to write a `get-check-set` pattern you should most likely /// be using `mutate` instead. pub fn set(&self, value: &impl codec::Encode) { - value.using_encoded(|val| { - sp_io::offchain::local_storage_set(self.kind, self.key, val) - }) + value.using_encoded(|val| sp_io::offchain::local_storage_set(self.kind, self.key, val)) } /// Remove the associated value from the storage. @@ -83,8 +81,7 @@ impl<'a> StorageValueRef<'a> { /// Returns an error if the value could not be decoded. pub fn get(&self) -> Result, StorageRetrievalError> { sp_io::offchain::local_storage_get(self.kind, self.key) - .map(|val| T::decode(&mut &*val) - .map_err(|_| StorageRetrievalError::Undecodable)) + .map(|val| T::decode(&mut &*val).map_err(|_| StorageRetrievalError::Undecodable)) .transpose() } @@ -98,26 +95,22 @@ impl<'a> StorageValueRef<'a> { /// 2. `Err(MutateStorageError::ConcurrentModification(T))` in case the value was calculated /// by the passed closure `mutate_val`, but it could not be stored. /// 3. `Err(MutateStorageError::ValueFunctionFailed(_))` in case `mutate_val` returns an error. - pub fn mutate(&self, mutate_val: F) -> Result> where + pub fn mutate(&self, mutate_val: F) -> Result> + where T: codec::Codec, - F: FnOnce(Result, StorageRetrievalError>) -> Result + F: FnOnce(Result, StorageRetrievalError>) -> Result, { let value = sp_io::offchain::local_storage_get(self.kind, self.key); - let decoded = value.as_deref() - .map(|mut bytes| { - T::decode(&mut bytes) - .map_err(|_| StorageRetrievalError::Undecodable) - }).transpose(); + let decoded = value + .as_deref() + .map(|mut bytes| T::decode(&mut bytes).map_err(|_| StorageRetrievalError::Undecodable)) + .transpose(); - let val = mutate_val(decoded).map_err(|err| MutateStorageError::ValueFunctionFailed(err))?; + let val = + mutate_val(decoded).map_err(|err| MutateStorageError::ValueFunctionFailed(err))?; let set = val.using_encoded(|new_val| { - sp_io::offchain::local_storage_compare_and_set( - self.kind, - self.key, - value, - new_val, - ) + sp_io::offchain::local_storage_compare_and_set(self.kind, self.key, value, new_val) }); if set { Ok(val) @@ -130,11 +123,8 @@ impl<'a> StorageValueRef<'a> { #[cfg(test)] mod tests { use super::*; + use sp_core::offchain::{testing, OffchainDbExt}; use sp_io::TestExternalities; - use sp_core::offchain::{ - OffchainDbExt, - testing, - }; #[test] fn should_set_and_get() { @@ -151,10 +141,7 @@ mod tests { assert_eq!(val.get::(), Ok(Some(15_u32))); assert_eq!(val.get::>(), Err(StorageRetrievalError::Undecodable)); - assert_eq!( - state.read().persistent_storage.get(b"testval"), - Some(vec![15_u8, 0, 0, 0]) - ); + assert_eq!(state.read().persistent_storage.get(b"testval"), Some(vec![15_u8, 0, 0, 0])); }) } @@ -174,10 +161,7 @@ mod tests { }); assert_eq!(result, Ok(16_u32)); assert_eq!(val.get::(), Ok(Some(16_u32))); - assert_eq!( - state.read().persistent_storage.get(b"testval"), - Some(vec![16_u8, 0, 0, 0]) - ); + assert_eq!(state.read().persistent_storage.get(b"testval"), Some(vec![16_u8, 0, 0, 0])); // mutate again, but this time early-exit. let res = val.mutate::(|val| { diff --git a/substrate/primitives/runtime/src/offchain/storage_lock.rs b/substrate/primitives/runtime/src/offchain/storage_lock.rs index 7ea52775c5e0582824931b49c0673e9958e4e45e..b4833bf345fc05125a6f46fb07d0d413bb308ff3 100644 --- a/substrate/primitives/runtime/src/offchain/storage_lock.rs +++ b/substrate/primitives/runtime/src/offchain/storage_lock.rs @@ -38,8 +38,8 @@ //! # use codec::{Decode, Encode, Codec}; //! // in your off-chain worker code //! use sp_runtime::offchain::{ -//! storage::StorageValueRef, -//! storage_lock::{StorageLock, Time}, +//! storage::StorageValueRef, +//! storage_lock::{StorageLock, Time}, //! }; //! //! fn append_to_in_storage_vec<'a, T>(key: &'a [u8], _: T) where T: Codec { @@ -61,8 +61,10 @@ //! } //! ``` -use crate::offchain::storage::{StorageRetrievalError, MutateStorageError, StorageValueRef}; -use crate::traits::BlockNumberProvider; +use crate::{ + offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, + traits::BlockNumberProvider, +}; use codec::{Codec, Decode, Encode}; use sp_core::offchain::{Duration, Timestamp}; use sp_io::offchain; @@ -115,9 +117,7 @@ pub struct Time { impl Default for Time { fn default() -> Self { - Self { - expiration_duration: STORAGE_LOCK_DEFAULT_EXPIRY_DURATION, - } + Self { expiration_duration: STORAGE_LOCK_DEFAULT_EXPIRY_DURATION } } } @@ -157,10 +157,7 @@ pub struct BlockAndTimeDeadline { impl Clone for BlockAndTimeDeadline { fn clone(&self) -> Self { - Self { - block_number: self.block_number.clone(), - timestamp: self.timestamp, - } + Self { block_number: self.block_number.clone(), timestamp: self.timestamp } } } @@ -175,7 +172,8 @@ impl Default for BlockAndTimeDeadline { } impl fmt::Debug for BlockAndTimeDeadline - where ::BlockNumber: fmt::Debug +where + ::BlockNumber: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("BlockAndTimeDeadline") @@ -225,8 +223,8 @@ impl Lockable for BlockAndTime { type Deadline = BlockAndTimeDeadline; fn deadline(&self) -> Self::Deadline { - let block_number = ::current_block_number() - + self.expiration_block_number_offset.into(); + let block_number = ::current_block_number() + + self.expiration_block_number_offset.into(); BlockAndTimeDeadline { timestamp: offchain::timestamp().add(self.expiration_duration), block_number, @@ -234,8 +232,8 @@ impl Lockable for BlockAndTime { } fn has_expired(deadline: &Self::Deadline) -> bool { - offchain::timestamp() > deadline.timestamp - && ::current_block_number() > deadline.block_number + offchain::timestamp() > deadline.timestamp && + ::current_block_number() > deadline.block_number } fn snooze(deadline: &Self::Deadline) { @@ -271,10 +269,7 @@ impl<'a, L: Lockable + Default> StorageLock<'a, L> { impl<'a, L: Lockable> StorageLock<'a, L> { /// Create a new storage lock with an explicit instance of a lockable `L`. pub fn with_lockable(key: &'a [u8], lockable: L) -> Self { - Self { - value_ref: StorageValueRef::<'a>::persistent(key), - lockable, - } + Self { value_ref: StorageValueRef::<'a>::persistent(key), lockable } } /// Extend active lock's deadline @@ -398,9 +393,7 @@ impl<'a> StorageLock<'a, Time> { pub fn with_deadline(key: &'a [u8], expiration_duration: Duration) -> Self { Self { value_ref: StorageValueRef::<'a>::persistent(key), - lockable: Time { - expiration_duration, - }, + lockable: Time { expiration_duration }, } } } @@ -443,7 +436,7 @@ where #[cfg(test)] mod tests { use super::*; - use sp_core::offchain::{testing, OffchainWorkerExt, OffchainDbExt}; + use sp_core::offchain::{testing, OffchainDbExt, OffchainWorkerExt}; use sp_io::TestExternalities; const VAL_1: u32 = 0u32; diff --git a/substrate/primitives/runtime/src/runtime_logger.rs b/substrate/primitives/runtime/src/runtime_logger.rs index f74704390174d8264c8fb3cf22149efed219d5ea..ff0e531ed814f4fa07842e47a075218704b35c7d 100644 --- a/substrate/primitives/runtime/src/runtime_logger.rs +++ b/substrate/primitives/runtime/src/runtime_logger.rs @@ -57,11 +57,7 @@ impl log::Log for RuntimeLogger { let mut w = sp_std::Writer::default(); let _ = ::core::write!(&mut w, "{}", record.args()); - sp_io::logging::log( - record.level().into(), - record.target(), - w.inner(), - ); + sp_io::logging::log(record.level().into(), record.target(), w.inner()); } fn flush(&self) {} @@ -69,12 +65,12 @@ impl log::Log for RuntimeLogger { #[cfg(test)] mod tests { + use sp_api::{BlockId, ProvideRuntimeApi}; + use std::{env, str::FromStr}; use substrate_test_runtime_client::{ - ExecutionStrategy, TestClientBuilderExt, DefaultTestClientBuilderExt, - TestClientBuilder, runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, ExecutionStrategy, TestClientBuilder, + TestClientBuilderExt, }; - use sp_api::{ProvideRuntimeApi, BlockId}; - use std::{env, str::FromStr}; #[test] fn ensure_runtime_logger_respects_host_max_log_level() { @@ -83,7 +79,8 @@ mod tests { log::set_max_level(log::LevelFilter::from_str(&env::var("RUST_LOG").unwrap()).unwrap()); let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(0); runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); diff --git a/substrate/primitives/runtime/src/runtime_string.rs b/substrate/primitives/runtime/src/runtime_string.rs index e315de430c12dbb96328d1efe4d515543d510fdb..273a22e98f33d54f19036fd887857881cf681e69 100644 --- a/substrate/primitives/runtime/src/runtime_string.rs +++ b/substrate/primitives/runtime/src/runtime_string.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_core::RuntimeDebug; use sp_std::vec::Vec; @@ -47,7 +47,6 @@ macro_rules! format_runtime_string { }}; } - impl From<&'static str> for RuntimeString { fn from(data: &'static str) -> Self { Self::Borrowed(data) @@ -130,5 +129,7 @@ impl<'de> serde::Deserialize<'de> for RuntimeString { /// Create a const [`RuntimeString`]. #[macro_export] macro_rules! create_runtime_str { - ( $y:expr ) => {{ $crate::RuntimeString::Borrowed($y) }} + ( $y:expr ) => {{ + $crate::RuntimeString::Borrowed($y) + }}; } diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index f473dc7028f4e947795a60e073e6005e040d4f11..60dc54e09534f18d42777fef07a51db0dd4f578c 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -17,18 +17,27 @@ //! Testing utilities. -use serde::{Serialize, Serializer, Deserialize, de::Error as DeError, Deserializer}; -use std::{fmt::{self, Debug}, ops::Deref, cell::RefCell}; -use crate::codec::{Codec, Encode, Decode}; -use crate::traits::{ - self, Checkable, Applyable, BlakeTwo256, OpaqueKeys, - SignedExtension, Dispatchable, DispatchInfoOf, PostDispatchInfoOf, +use crate::{ + codec::{Codec, Decode, Encode}, + generic, + traits::{ + self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, + PostDispatchInfoOf, SignedExtension, ValidateUnsigned, + }, + transaction_validity::{TransactionSource, TransactionValidity, TransactionValidityError}, + ApplyExtrinsicResultWithInfo, CryptoTypeId, KeyTypeId, +}; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; +use sp_core::{ + crypto::{key_types, CryptoType, Dummy, Public}, + U256, +}; +pub use sp_core::{sr25519, H256}; +use std::{ + cell::RefCell, + fmt::{self, Debug}, + ops::Deref, }; -use crate::traits::ValidateUnsigned; -use crate::{generic, KeyTypeId, CryptoTypeId, ApplyExtrinsicResultWithInfo}; -pub use sp_core::{H256, sr25519}; -use sp_core::{crypto::{CryptoType, Dummy, key_types, Public}, U256}; -use crate::transaction_validity::{TransactionValidity, TransactionValidityError, TransactionSource}; /// A dummy type which can be used instead of regular cryptographic primitives. /// @@ -36,7 +45,20 @@ use crate::transaction_validity::{TransactionValidity, TransactionValidityError, /// 2. Can be converted to any `Public` key. /// 3. Implements `RuntimeAppPublic` so it can be used instead of regular application-specific /// crypto. -#[derive(Default, PartialEq, Eq, Clone, Encode, Decode, Debug, Hash, Serialize, Deserialize, PartialOrd, Ord)] +#[derive( + Default, + PartialEq, + Eq, + Clone, + Encode, + Decode, + Debug, + Hash, + Serialize, + Deserialize, + PartialOrd, + Ord, +)] pub struct UintAuthorityId(pub u64); impl From for UintAuthorityId { @@ -68,7 +90,10 @@ impl AsRef<[u8]> for UintAuthorityId { // Unsafe, i know, but it's test code and it's just there because it's really convenient to // keep `UintAuthorityId` as a u64 under the hood. unsafe { - std::slice::from_raw_parts(&self.0 as *const u64 as *const _, std::mem::size_of::()) + std::slice::from_raw_parts( + &self.0 as *const u64 as *const _, + std::mem::size_of::(), + ) } } } @@ -80,7 +105,7 @@ thread_local! { impl UintAuthorityId { /// Set the list of keys returned by the runtime call for all keys of that type. - pub fn set_all_keys>(keys: impl IntoIterator) { + pub fn set_all_keys>(keys: impl IntoIterator) { ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) } } @@ -180,7 +205,8 @@ impl Header { pub struct ExtrinsicWrapper(Xt); impl traits::Extrinsic for ExtrinsicWrapper -where Xt: parity_util_mem::MallocSizeOf +where + Xt: parity_util_mem::MallocSizeOf, { type Call = (); type SignaturePayload = (); @@ -191,7 +217,10 @@ where Xt: parity_util_mem::MallocSizeOf } impl serde::Serialize for ExtrinsicWrapper { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -219,8 +248,9 @@ pub struct Block { pub extrinsics: Vec, } -impl traits::Block - for Block +impl< + Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, + > traits::Block for Block { type Extrinsic = Xt; type Header = Header; @@ -243,7 +273,10 @@ impl Deserialize<'a> for Block where Block: Decode { +impl<'a, Xt> Deserialize<'a> for Block +where + Block: Decode, +{ fn deserialize>(de: D) -> Result { let r = >::deserialize(de)?; Decode::decode(&mut &r[..]) @@ -273,8 +306,14 @@ impl TestXt { // Non-opaque extrinsics always 0. parity_util_mem::malloc_size_of_is_0!(any: TestXt); -impl Serialize for TestXt where TestXt: Encode { - fn serialize(&self, seq: S) -> Result where S: Serializer { +impl Serialize for TestXt +where + TestXt: Encode, +{ + fn serialize(&self, seq: S) -> Result + where + S: Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -287,7 +326,9 @@ impl Debug for TestXt { impl Checkable for TestXt { type Checked = Self; - fn check(self, _: &Context) -> Result { Ok(self) } + fn check(self, _: &Context) -> Result { + Ok(self) + } } impl traits::Extrinsic for TestXt { @@ -303,23 +344,26 @@ impl traits::Extrinsic for TestXt } } -impl traits::ExtrinsicMetadata for TestXt where +impl traits::ExtrinsicMetadata for TestXt +where Call: Codec + Sync + Send, - Extra: SignedExtension, + Extra: SignedExtension, { type SignedExtensions = Extra; const VERSION: u8 = 0u8; } -impl Applyable for TestXt where - Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, - Extra: SignedExtension, +impl Applyable for TestXt +where + Call: + 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, + Extra: SignedExtension, Origin: From>, { type Call = Call; /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( + fn validate>( &self, source: TransactionSource, info: &DispatchInfoOf, @@ -336,7 +380,7 @@ impl Applyable for TestXt where /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, diff --git a/substrate/primitives/runtime/src/traits.rs b/substrate/primitives/runtime/src/traits.rs index 4396c97598231a75627e440291ea08f7060c24c7..3baf7c6655b9029b0e0d832fb3c545b51920f111 100644 --- a/substrate/primitives/runtime/src/traits.rs +++ b/substrate/primitives/runtime/src/traits.rs @@ -17,29 +17,36 @@ //! Primitives for the runtime modules. -use sp_std::prelude::*; -use sp_std::{self, marker::PhantomData, convert::{TryFrom, TryInto}, fmt::Debug}; +use crate::{ + codec::{Codec, Decode, Encode, MaxEncodedLen}, + generic::{Digest, DigestItem}, + transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, + ValidTransaction, + }, + DispatchResult, +}; +use impl_trait_for_tuples::impl_for_tuples; +#[cfg(feature = "std")] +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use sp_application_crypto::AppKey; +pub use sp_arithmetic::traits::{ + AtLeast32Bit, AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedShl, + CheckedShr, CheckedSub, IntegerSquareRoot, One, SaturatedConversion, Saturating, + UniqueSaturatedFrom, UniqueSaturatedInto, Zero, +}; +use sp_core::{self, Hasher, RuntimeDebug, TypeId}; +use sp_std::{ + self, + convert::{TryFrom, TryInto}, + fmt::Debug, + marker::PhantomData, + prelude::*, +}; #[cfg(feature = "std")] use std::fmt::Display; #[cfg(feature = "std")] use std::str::FromStr; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, Hasher, TypeId, RuntimeDebug}; -use crate::codec::{Codec, Encode, Decode, MaxEncodedLen}; -use crate::transaction_validity::{ - ValidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, - UnknownTransaction, -}; -use crate::generic::{Digest, DigestItem}; -pub use sp_arithmetic::traits::{ - AtLeast32Bit, AtLeast32BitUnsigned, UniqueSaturatedInto, UniqueSaturatedFrom, Saturating, - SaturatedConversion, Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, - CheckedShl, CheckedShr, IntegerSquareRoot -}; -use sp_application_crypto::AppKey; -use impl_trait_for_tuples::impl_for_tuples; -use crate::DispatchResult; /// A lazy value. pub trait Lazy { @@ -50,7 +57,9 @@ pub trait Lazy { } impl<'a> Lazy<[u8]> for &'a [u8] { - fn get(&mut self) -> &[u8] { &**self } + fn get(&mut self) -> &[u8] { + &**self + } } /// Some type that is able to be collapsed into an account ID. It is not possible to recreate the @@ -64,17 +73,23 @@ pub trait IdentifyAccount { impl IdentifyAccount for sp_core::ed25519::Public { type AccountId = Self; - fn into_account(self) -> Self { self } + fn into_account(self) -> Self { + self + } } impl IdentifyAccount for sp_core::sr25519::Public { type AccountId = Self; - fn into_account(self) -> Self { self } + fn into_account(self) -> Self { + self + } } impl IdentifyAccount for sp_core::ecdsa::Public { type AccountId = Self; - fn into_account(self) -> Self { self } + fn into_account(self) -> Self { + self + } } /// Means of signature verification. @@ -84,7 +99,11 @@ pub trait Verify { /// Verify a signature. /// /// Return `true` if signature is valid for the value. - fn verify>(&self, msg: L, signer: &::AccountId) -> bool; + fn verify>( + &self, + msg: L, + signer: &::AccountId, + ) -> bool; } impl Verify for sp_core::ed25519::Signature { @@ -125,19 +144,27 @@ pub trait AppVerify { } impl< - S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + From, - T: sp_application_crypto::Wraps + sp_application_crypto::AppKey + sp_application_crypto::AppSignature + - AsRef + AsMut + From, -> AppVerify for T where + S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + + From, + T: sp_application_crypto::Wraps + + sp_application_crypto::AppKey + + sp_application_crypto::AppSignature + + AsRef + + AsMut + + From, + > AppVerify for T +where ::Signer: IdentifyAccount::Signer>, - <::Public as sp_application_crypto::AppPublic>::Generic: - IdentifyAccount::Public as sp_application_crypto::AppPublic>::Generic>, + <::Public as sp_application_crypto::AppPublic>::Generic: IdentifyAccount< + AccountId = <::Public as sp_application_crypto::AppPublic>::Generic, + >, { type AccountId = ::Public; fn verify>(&self, msg: L, signer: &::Public) -> bool { use sp_application_crypto::IsWrappedBy; let inner: &S = self.as_ref(); - let inner_pubkey = <::Public as sp_application_crypto::AppPublic>::Generic::from_ref(&signer); + let inner_pubkey = + <::Public as sp_application_crypto::AppPublic>::Generic::from_ref(&signer); Verify::verify(inner, msg, inner_pubkey) } } @@ -198,14 +225,20 @@ pub struct IdentityLookup(PhantomData); impl StaticLookup for IdentityLookup { type Source = T; type Target = T; - fn lookup(x: T) -> Result { Ok(x) } - fn unlookup(x: T) -> T { x } + fn lookup(x: T) -> Result { + Ok(x) + } + fn unlookup(x: T) -> T { + x + } } impl Lookup for IdentityLookup { type Source = T; type Target = T; - fn lookup(&self, x: T) -> Result { Ok(x) } + fn lookup(&self, x: T) -> Result { + Ok(x) + } } /// A lookup implementation returning the `AccountId` from a `MultiAddress`. @@ -253,19 +286,25 @@ pub trait Convert { } impl Convert for () { - fn convert(_: A) -> B { Default::default() } + fn convert(_: A) -> B { + Default::default() + } } /// A structure that performs identity conversion. pub struct Identity; impl Convert for Identity { - fn convert(a: T) -> T { a } + fn convert(a: T) -> T { + a + } } /// A structure that performs standard conversion using the standard Rust conversion traits. pub struct ConvertInto; impl> Convert for ConvertInto { - fn convert(a: A) -> B { a.into() } + fn convert(a: A) -> B { + a.into() + } } /// Convenience type to work around the highly unergonomic syntax needed @@ -277,7 +316,10 @@ pub trait CheckedConversion { /// This just uses `TryFrom` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn checked_from(t: T) -> Option where Self: TryFrom { + fn checked_from(t: T) -> Option + where + Self: TryFrom, + { >::try_from(t).ok() } /// Consume self to return `Some` equivalent value of `Option`. @@ -285,7 +327,10 @@ pub trait CheckedConversion { /// This just uses `TryInto` internally but with this /// variant you can provide the destination type using turbofish syntax /// in case Rust happens not to assume the correct type. - fn checked_into(self) -> Option where Self: TryInto { + fn checked_into(self) -> Option + where + Self: TryInto, + { >::try_into(self).ok() } } @@ -310,11 +355,17 @@ macro_rules! impl_scale { ($self:ty, $other:ty) => { impl Scale<$other> for $self { type Output = Self; - fn mul(self, other: $other) -> Self::Output { self * (other as Self) } - fn div(self, other: $other) -> Self::Output { self / (other as Self) } - fn rem(self, other: $other) -> Self::Output { self % (other as Self) } + fn mul(self, other: $other) -> Self::Output { + self * (other as Self) + } + fn div(self, other: $other) -> Self::Output { + self / (other as Self) + } + fn rem(self, other: $other) -> Self::Output { + self % (other as Self) + } } - } + }; } impl_scale!(u128, u128); impl_scale!(u128, u64); @@ -343,31 +394,57 @@ pub trait Clear { } impl Clear for T { - fn is_clear(&self) -> bool { *self == Self::clear() } - fn clear() -> Self { Default::default() } + fn is_clear(&self) -> bool { + *self == Self::clear() + } + fn clear() -> Self { + Default::default() + } } /// A meta trait for all bit ops. pub trait SimpleBitOps: - Sized + Clear + - sp_std::ops::BitOr + - sp_std::ops::BitXor + - sp_std::ops::BitAnd -{} -impl + - sp_std::ops::BitXor + - sp_std::ops::BitAnd -> SimpleBitOps for T {} + Sized + + Clear + + sp_std::ops::BitOr + + sp_std::ops::BitXor + + sp_std::ops::BitAnd +{ +} +impl< + T: Sized + + Clear + + sp_std::ops::BitOr + + sp_std::ops::BitXor + + sp_std::ops::BitAnd, + > SimpleBitOps for T +{ +} /// Abstraction around hashing // Stupid bug in the Rust compiler believes derived // traits must be fulfilled by all type parameters. -pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + PartialEq + Hasher::Output> { +pub trait Hash: + 'static + + MaybeSerializeDeserialize + + Debug + + Clone + + Eq + + PartialEq + + Hasher::Output> +{ /// The hash type produced. - type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode + MaxEncodedLen; + type Output: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default + + Encode + + Decode + + MaxEncodedLen; /// Produce the hash of some byte-slice. fn hash(s: &[u8]) -> Self::Output { @@ -469,7 +546,10 @@ impl CheckEqual for sp_core::H256 { } } -impl CheckEqual for super::generic::DigestItem where H: Encode { +impl CheckEqual for super::generic::DigestItem +where + H: Encode, +{ #[cfg(feature = "std")] fn check_equal(&self, other: &Self) { if self != other { @@ -523,16 +603,33 @@ pub trait IsMember { /// /// You can also create a `new` one from those fields. pub trait Header: - Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + - MaybeMallocSizeOf + 'static + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static { /// Header number. - type Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Copy + - MaybeDisplay + AtLeast32BitUnsigned + Codec + sp_std::str::FromStr + MaybeMallocSizeOf; + type Number: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32BitUnsigned + + Codec + + sp_std::str::FromStr + + MaybeMallocSizeOf; /// Header hash type - type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> - + AsMut<[u8]> + MaybeMallocSizeOf; + type Hash: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Ord + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf; /// Hashing algorithm type Hashing: Hash; @@ -580,14 +677,26 @@ pub trait Header: /// `Extrinsic` pieces of information as well as a `Header`. /// /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. -pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static { +pub trait Block: + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static +{ /// Type for extrinsics. type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize + MaybeMallocSizeOf; /// Header type. - type Header: Header + MaybeMallocSizeOf; + type Header: Header + MaybeMallocSizeOf; /// Block hash type. - type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]> + type Hash: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Ord + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]> + MaybeMallocSizeOf; /// Returns a reference to the header. @@ -607,7 +716,6 @@ pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + May fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec; } - /// Something that acts like an `Extrinsic`. pub trait Extrinsic: Sized + MaybeMallocSizeOf { /// The function call. @@ -622,7 +730,9 @@ pub trait Extrinsic: Sized + MaybeMallocSizeOf { /// Is this `Extrinsic` signed? /// If no information are available about signed/unsigned, `None` should be returned. - fn is_signed(&self) -> Option { None } + fn is_signed(&self) -> Option { + None + } /// Create new instance of the extrinsic. /// @@ -630,7 +740,9 @@ pub trait Extrinsic: Sized + MaybeMallocSizeOf { /// 1. Inherents (no signature; created by validators during block production) /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of calls) /// 3. Signed Transactions (with signature; a regular transactions with known origin) - fn new(_call: Self::Call, _signed_data: Option) -> Option { None } + fn new(_call: Self::Call, _signed_data: Option) -> Option { + None + } } /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. @@ -878,9 +990,13 @@ impl SignedExtension for Tuple { Ok(valid) } - fn pre_dispatch(self, who: &Self::AccountId, call: &Self::Call, info: &DispatchInfoOf, len: usize) - -> Result - { + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) } @@ -928,7 +1044,9 @@ impl SignedExtension for () { type Call = (); type Pre = (); const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } } /// An "executable" piece of information, used by the standard Substrate Executive in order to @@ -942,7 +1060,7 @@ pub trait Applyable: Sized + Send + Sync { type Call: Dispatchable; /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( + fn validate>( &self, source: TransactionSource, info: &DispatchInfoOf, @@ -951,7 +1069,7 @@ pub trait Applyable: Sized + Send + Sync { /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. - fn apply>( + fn apply>( self, info: &DispatchInfoOf, len: usize, @@ -1020,7 +1138,9 @@ pub trait OpaqueKeys: Clone { T::decode(&mut self.get_raw(i)).ok() } /// Verify a proof of ownership for the keys. - fn ownership_proof_is_valid(&self, _proof: &[u8]) -> bool { true } + fn ownership_proof_is_valid(&self, _proof: &[u8]) -> bool { + true + } } /// Input that adds infinite number of zero after wrapped input. @@ -1056,7 +1176,7 @@ impl<'a, T: codec::Input> codec::Input for AppendZerosInput<'a, T> { into[i] = b; i += 1; } else { - break; + break } } i @@ -1099,7 +1219,9 @@ impl<'a> codec::Input for TrailingZeroInput<'a> { /// This type can be converted into and possibly from an AccountId (which itself is generic). pub trait AccountIdConversion: Sized { /// Convert into an account ID. This is infallible. - fn into_account(&self) -> AccountId { self.into_sub_account(&()) } + fn into_account(&self) -> AccountId { + self.into_sub_account(&()) + } /// Try to convert an account ID into this type. Might not succeed. fn try_from_account(a: &AccountId) -> Option { @@ -1125,14 +1247,16 @@ pub trait AccountIdConversion: Sized { /// fill AccountId. impl AccountIdConversion for Id { fn into_sub_account(&self, sub: S) -> T { - (Id::TYPE_ID, self, sub).using_encoded(|b| - T::decode(&mut TrailingZeroInput(b)) - ).unwrap_or_default() + (Id::TYPE_ID, self, sub) + .using_encoded(|b| T::decode(&mut TrailingZeroInput(b))) + .unwrap_or_default() } fn try_from_sub_account(x: &T) -> Option<(Self, S)> { x.using_encoded(|d| { - if &d[0..4] != Id::TYPE_ID { return None } + if &d[0..4] != Id::TYPE_ID { + return None + } let mut cursor = &d[4..]; let result = Decode::decode(&mut cursor).ok()?; if cursor.iter().all(|x| *x == 0) { @@ -1466,19 +1590,19 @@ pub trait BlockNumberProvider { #[cfg(test)] mod tests { use super::*; - use crate::codec::{Encode, Decode, Input}; + use crate::codec::{Decode, Encode, Input}; use sp_core::{crypto::Pair, ecdsa}; mod t { - use sp_core::crypto::KeyTypeId; use sp_application_crypto::{app_crypto, sr25519}; + use sp_core::crypto::KeyTypeId; app_crypto!(sr25519, KeyTypeId(*b"test")); } #[test] fn app_verify_works() { - use t::*; use super::AppVerify; + use t::*; let s = Signature::default(); let _ = s.verify(&[0u8; 100][..], &Public::default()); diff --git a/substrate/primitives/runtime/src/transaction_validity.rs b/substrate/primitives/runtime/src/transaction_validity.rs index 1768c27d6f5a6df6a5bf8741809e9bee63b4ff40..939452384f75e0b019c7f403d378f4f75b59d444 100644 --- a/substrate/primitives/runtime/src/transaction_validity.rs +++ b/substrate/primitives/runtime/src/transaction_validity.rs @@ -17,9 +17,11 @@ //! Transaction validity interface. +use crate::{ + codec::{Decode, Encode}, + RuntimeDebug, +}; use sp_std::prelude::*; -use crate::codec::{Encode, Decode}; -use crate::RuntimeDebug; /// Priority for a transaction. Additive. Higher is better. pub type TransactionPriority = u64; @@ -98,8 +100,7 @@ impl From for &'static str { InvalidTransaction::Stale => "Transaction is outdated", InvalidTransaction::BadProof => "Transaction has a bad signature", InvalidTransaction::AncientBirthBlock => "Transaction has an ancient birth block", - InvalidTransaction::ExhaustsResources => - "Transaction would exhaust the block limits", + InvalidTransaction::ExhaustsResources => "Transaction would exhaust the block limits", InvalidTransaction::Payment => "Inability to pay some fees (e.g. account balance too low)", InvalidTransaction::BadMandatory => @@ -220,7 +221,9 @@ impl From for TransactionValidity { /// Depending on the source we might apply different validation schemes. /// For instance we can disallow specific kinds of transactions if they were not produced /// by our local node (for instance off-chain workers). -#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf)] +#[derive( + Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf, +)] pub enum TransactionSource { /// Transaction is already included in block. /// @@ -295,10 +298,7 @@ impl ValidTransaction { /// To avoid conflicts between different parts in runtime it's recommended to build `requires` /// and `provides` tags with a unique prefix. pub fn with_tag_prefix(prefix: &'static str) -> ValidTransactionBuilder { - ValidTransactionBuilder { - prefix: Some(prefix), - validity: Default::default(), - } + ValidTransactionBuilder { prefix: Some(prefix), validity: Default::default() } } /// Combine two instances into one, as a best effort. This will take the superset of each of the @@ -307,8 +307,14 @@ impl ValidTransaction { pub fn combine_with(mut self, mut other: ValidTransaction) -> Self { Self { priority: self.priority.saturating_add(other.priority), - requires: { self.requires.append(&mut other.requires); self.requires }, - provides: { self.provides.append(&mut other.provides); self.provides }, + requires: { + self.requires.append(&mut other.requires); + self.requires + }, + provides: { + self.provides.append(&mut other.provides); + self.provides + }, longevity: self.longevity.min(other.longevity), propagate: self.propagate && other.propagate, } @@ -412,7 +418,6 @@ impl From for ValidTransaction { } } - #[cfg(test)] mod tests { use super::*; @@ -430,7 +435,10 @@ mod tests { let encoded = v.encode(); assert_eq!( encoded, - vec![0, 5, 0, 0, 0, 0, 0, 0, 0, 4, 16, 1, 2, 3, 4, 4, 12, 4, 5, 6, 42, 0, 0, 0, 0, 0, 0, 0, 0] + vec![ + 0, 5, 0, 0, 0, 0, 0, 0, 0, 4, 16, 1, 2, 3, 4, 4, 12, 4, 5, 6, 42, 0, 0, 0, 0, 0, 0, + 0, 0 + ] ); // decode back @@ -450,12 +458,15 @@ mod tests { .priority(3) .priority(6) .into(); - assert_eq!(a, ValidTransaction { - propagate: false, - longevity: 5, - priority: 6, - requires: vec![(PREFIX, 1).encode(), (PREFIX, 2).encode()], - provides: vec![(PREFIX, 3).encode(), (PREFIX, 4).encode()], - }); + assert_eq!( + a, + ValidTransaction { + propagate: false, + longevity: 5, + priority: 6, + requires: vec![(PREFIX, 1).encode(), (PREFIX, 2).encode()], + provides: vec![(PREFIX, 3).encode(), (PREFIX, 4).encode()], + } + ); } } diff --git a/substrate/primitives/sandbox/src/lib.rs b/substrate/primitives/sandbox/src/lib.rs index 22e68439958d9dfb94e0c38a1cb0926f2c33560e..a433d57c3b51530329f69a7ea3c24aa84e617e6a 100755 --- a/substrate/primitives/sandbox/src/lib.rs +++ b/substrate/primitives/sandbox/src/lib.rs @@ -41,7 +41,7 @@ use sp_std::prelude::*; pub use sp_core::sandbox::HostError; -pub use sp_wasm_interface::{Value, ReturnValue}; +pub use sp_wasm_interface::{ReturnValue, Value}; mod imp { #[cfg(feature = "std")] @@ -100,9 +100,7 @@ impl Memory { /// /// Allocated memory is always zeroed. pub fn new(initial: u32, maximum: Option) -> Result { - Ok(Memory { - inner: imp::Memory::new(initial, maximum)?, - }) + Ok(Memory { inner: imp::Memory::new(initial, maximum)? }) } /// Read a memory area at the address `ptr` with the size of the provided slice `buf`. @@ -131,9 +129,7 @@ pub struct EnvironmentDefinitionBuilder { impl EnvironmentDefinitionBuilder { /// Construct a new `EnvironmentDefinitionBuilder`. pub fn new() -> EnvironmentDefinitionBuilder { - EnvironmentDefinitionBuilder { - inner: imp::EnvironmentDefinitionBuilder::new(), - } + EnvironmentDefinitionBuilder { inner: imp::EnvironmentDefinitionBuilder::new() } } /// Register a host function in this environment definition. @@ -176,12 +172,12 @@ impl Instance { /// be returned. /// /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html - pub fn new(code: &[u8], env_def_builder: &EnvironmentDefinitionBuilder, state: &mut T) - -> Result, Error> - { - Ok(Instance { - inner: imp::Instance::new(code, &env_def_builder.inner, state)?, - }) + pub fn new( + code: &[u8], + env_def_builder: &EnvironmentDefinitionBuilder, + state: &mut T, + ) -> Result, Error> { + Ok(Instance { inner: imp::Instance::new(code, &env_def_builder.inner, state)? }) } /// Invoke an exported function with the given name. diff --git a/substrate/primitives/serializer/src/lib.rs b/substrate/primitives/serializer/src/lib.rs index 3aef9ef5a387356621d250ed17278dff83278b4e..ccdbbf27f179ba6efaf9c633adf1c6aa72ca1805 100644 --- a/substrate/primitives/serializer/src/lib.rs +++ b/substrate/primitives/serializer/src/lib.rs @@ -22,7 +22,7 @@ #![warn(missing_docs)] -pub use serde_json::{from_str, from_slice, from_reader, Result, Error}; +pub use serde_json::{from_reader, from_slice, from_str, Error, Result}; const PROOF: &str = "Serializers are infallible; qed"; @@ -37,6 +37,9 @@ pub fn encode(value: &T) -> Vec { } /// Serialize the given data structure as JSON into the IO stream. -pub fn to_writer(writer: W, value: &T) -> Result<()> { +pub fn to_writer( + writer: W, + value: &T, +) -> Result<()> { serde_json::to_writer(writer, value) } diff --git a/substrate/primitives/session/src/lib.rs b/substrate/primitives/session/src/lib.rs index 9f63d64d414b036a3802da815d19321c618f9dfb..22d6b0b4a5920591c0d298d40a120e49d82d0fd5 100644 --- a/substrate/primitives/session/src/lib.rs +++ b/substrate/primitives/session/src/lib.rs @@ -19,15 +19,14 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; -#[cfg(feature = "std")] -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; #[cfg(feature = "std")] use sp_api::ProvideRuntimeApi; +#[cfg(feature = "std")] +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sp_core::RuntimeDebug; -use sp_core::crypto::KeyTypeId; +use sp_core::{crypto::KeyTypeId, RuntimeDebug}; use sp_staking::SessionIndex; use sp_std::vec::Vec; diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index ab72ecda042c049b8de77d08d4bb1b16274cae27..b9afda41c5e7788a925eb02ffd382340c038104b 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -20,7 +20,7 @@ use sp_std::vec::Vec; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_runtime::Perbill; use crate::SessionIndex; @@ -84,10 +84,7 @@ pub trait Offence { /// /// `offenders_count` - the count of unique offending authorities. It is >0. /// `validator_set_count` - the cardinality of the validator set at the time of offence. - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill; + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill; } /// Errors that may happen on offence reports. @@ -108,7 +105,7 @@ impl sp_runtime::traits::Printable for OffenceError { Self::Other(e) => { "Other".print(); e.print(); - } + }, } } } diff --git a/substrate/primitives/state-machine/src/backend.rs b/substrate/primitives/state-machine/src/backend.rs index 0dc054ed50390f95f97ade808aca9911de3548e6..de4ff33b51fe8bd06d128ecd4ad5045e0b4ef138 100644 --- a/substrate/primitives/state-machine/src/backend.rs +++ b/substrate/primitives/state-machine/src/backend.rs @@ -17,19 +17,16 @@ //! State machine backends. These manage the code and storage of contracts. -use hash_db::Hasher; -use codec::{Decode, Encode}; -use sp_core::{ - storage::{ChildInfo, well_known_keys, TrackedStorageKey} -}; use crate::{ - trie_backend::TrieBackend, - trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, ChildStorageCollection, + trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, ChildStorageCollection, + StorageCollection, StorageKey, StorageValue, UsageInfo, }; -use sp_std::vec::Vec; +use codec::{Decode, Encode}; +use hash_db::Hasher; +use sp_core::storage::{well_known_keys, ChildInfo, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; +use sp_std::vec::Vec; /// A state backend is used to read state data and can have changes committed /// to it. @@ -90,7 +87,7 @@ pub trait Backend: sp_std::fmt::Debug { fn next_child_storage_key( &self, child_info: &ChildInfo, - key: &[u8] + key: &[u8], ) -> Result, Self::Error>; /// Iterate over storage starting at key, for a given prefix and child trie. @@ -128,7 +125,6 @@ pub trait Backend: sp_std::fmt::Debug { /// call `f` for each of those keys. fn for_key_values_with_prefix(&self, prefix: &[u8], f: F); - /// Retrieve all child entries keys which start with the given prefix and /// call `f` for each of those keys. fn for_child_keys_with_prefix( @@ -143,8 +139,10 @@ pub trait Backend: sp_std::fmt::Debug { /// Does not include child storage updates. fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord; + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument @@ -152,8 +150,10 @@ pub trait Backend: sp_std::fmt::Debug { fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord; /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(StorageKey, StorageValue)>; @@ -166,11 +166,7 @@ pub trait Backend: sp_std::fmt::Debug { } /// Get all keys of child storage with given prefix - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec { let mut all = Vec::new(); self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec())); all @@ -186,18 +182,19 @@ pub trait Backend: sp_std::fmt::Debug { /// Does include child storage updates. fn full_storage_root<'a>( &self, - delta: impl Iterator)>, - child_deltas: impl Iterator)>, - )>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { + delta: impl Iterator)>, + child_deltas: impl Iterator< + Item = (&'a ChildInfo, impl Iterator)>), + >, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord + Encode, + { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = - self.child_storage_root(&child_info, child_delta); + let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { @@ -206,13 +203,10 @@ pub trait Backend: sp_std::fmt::Debug { child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } - let (root, parent_txs) = self.storage_root(delta - .map(|(k, v)| (k, v.as_ref().map(|v| &v[..]))) - .chain( - child_roots - .iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - ) + let (root, parent_txs) = self.storage_root( + delta + .map(|(k, v)| (k, v.as_ref().map(|v| &v[..]))) + .chain(child_roots.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))), ); txs.consolidate(parent_txs); (root, txs) @@ -286,10 +280,7 @@ impl Consolidate for () { } } -impl Consolidate for Vec<( - Option, - StorageCollection, - )> { +impl Consolidate for Vec<(Option, StorageCollection)> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } @@ -303,12 +294,15 @@ impl> Consolidate for sp_trie::GenericMem /// Insert input pairs into memory db. #[cfg(test)] -pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: I) -> Option - where - H: Hasher, - I: IntoIterator, +pub(crate) fn insert_into_memory_db( + mdb: &mut sp_trie::MemoryDB, + input: I, +) -> Option +where + H: Hasher, + I: IntoIterator, { - use sp_trie::{TrieMut, trie_types::TrieDBMut}; + use sp_trie::{trie_types::TrieDBMut, TrieMut}; let mut root = ::Out::default(); { @@ -316,7 +310,7 @@ pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: for (key, value) in input { if let Err(e) = trie.insert(&key, &value) { log::warn!(target: "trie", "Failed to write to trie: {}", e); - return None; + return None } } } @@ -332,8 +326,8 @@ pub struct BackendRuntimeCode<'a, B, H> { } #[cfg(feature = "std")] -impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for - BackendRuntimeCode<'a, B, H> +impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode + for BackendRuntimeCode<'a, B, H> { fn fetch_runtime_code<'b>(&'b self) -> Option> { self.backend.storage(well_known_keys::CODE).ok().flatten().map(Into::into) @@ -341,23 +335,27 @@ impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for } #[cfg(feature = "std")] -impl<'a, B: Backend, H: Hasher> BackendRuntimeCode<'a, B, H> where H::Out: Encode { +impl<'a, B: Backend, H: Hasher> BackendRuntimeCode<'a, B, H> +where + H::Out: Encode, +{ /// Create a new instance. pub fn new(backend: &'a B) -> Self { - Self { - backend, - _marker: std::marker::PhantomData, - } + Self { backend, _marker: std::marker::PhantomData } } /// Return the [`RuntimeCode`] build from the wrapped `backend`. pub fn runtime_code(&self) -> Result { - let hash = self.backend.storage_hash(well_known_keys::CODE) + let hash = self + .backend + .storage_hash(well_known_keys::CODE) .ok() .flatten() .ok_or("`:code` hash not found")? .encode(); - let heap_pages = self.backend.storage(well_known_keys::HEAP_PAGES) + let heap_pages = self + .backend + .storage(well_known_keys::HEAP_PAGES) .ok() .flatten() .and_then(|d| Decode::decode(&mut &d[..]).ok()); diff --git a/substrate/primitives/state-machine/src/basic.rs b/substrate/primitives/state-machine/src/basic.rs index 75b0c1c922e434ab0b6282977069d85a27c99a57..0bbd2d0a8e8e6e8168aeffe59877882784ce65f1 100644 --- a/substrate/primitives/state-machine/src/basic.rs +++ b/substrate/primitives/state-machine/src/basic.rs @@ -17,23 +17,25 @@ //! Basic implementation for Externalities. -use std::{ - collections::BTreeMap, any::{TypeId, Any}, iter::FromIterator, ops::Bound, -}; use crate::{Backend, StorageKey, StorageValue}; +use codec::Encode; use hash_db::Hasher; -use sp_trie::{TrieConfiguration, empty_child_trie_root}; -use sp_trie::trie_types::Layout; +use log::warn; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, Storage, - ChildInfo, StorageChild, TrackedStorageKey, + well_known_keys::is_child_storage_key, ChildInfo, Storage, StorageChild, TrackedStorageKey, }, - traits::Externalities, Blake2Hasher, + traits::Externalities, + Blake2Hasher, +}; +use sp_externalities::{Extension, Extensions}; +use sp_trie::{empty_child_trie_root, trie_types::Layout, TrieConfiguration}; +use std::{ + any::{Any, TypeId}, + collections::BTreeMap, + iter::FromIterator, + ops::Bound, }; -use log::warn; -use codec::Encode; -use sp_externalities::{Extensions, Extension}; /// Simple Map-based Externalities impl. #[derive(Debug)] @@ -105,13 +107,13 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) - && self.inner.children_default.eq(&other.inner.children_default) + self.inner.top.eq(&other.inner.top) && + self.inner.children_default.eq(&other.inner.children_default) } } impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { - fn from_iter>(iter: I) -> Self { + fn from_iter>(iter: I) -> Self { let mut t = Self::default(); t.inner.top.extend(iter); t @@ -119,16 +121,15 @@ impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { } impl Default for BasicExternalities { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + Self::new(Default::default()) + } } impl From> for BasicExternalities { fn from(hashmap: BTreeMap) -> Self { BasicExternalities { - inner: Storage { - top: hashmap, - children_default: Default::default(), - }, + inner: Storage { top: hashmap, children_default: Default::default() }, extensions: Default::default(), } } @@ -145,20 +146,15 @@ impl Externalities for BasicExternalities { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - self.inner.children_default.get(child_info.storage_key()) - .and_then(|child| child.data.get(key)).cloned() + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + self.inner + .children_default + .get(child_info.storage_key()) + .and_then(|child| child.data.get(key)) + .cloned() } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } @@ -167,25 +163,27 @@ impl Externalities for BasicExternalities { self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() } - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children_default.get(child_info.storage_key()) + self.inner + .children_default + .get(child_info.storage_key()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to set child storage key via main storage"); - return; + return } match maybe_value { - Some(value) => { self.inner.top.insert(key, value); } - None => { self.inner.top.remove(&key); } + Some(value) => { + self.inner.top.insert(key, value); + }, + None => { + self.inner.top.remove(&key); + }, } } @@ -195,7 +193,10 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let child_map = self.inner.children_default.entry(child_info.storage_key().to_vec()) + let child_map = self + .inner + .children_default + .entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -207,12 +208,13 @@ impl Externalities for BasicExternalities { } } - fn kill_child_storage( - &mut self, - child_info: &ChildInfo, - _limit: Option, - ) -> (bool, u32) { - let num_removed = self.inner.children_default.remove(child_info.storage_key()).map(|c| c.data.len()).unwrap_or(0); + fn kill_child_storage(&mut self, child_info: &ChildInfo, _limit: Option) -> (bool, u32) { + let num_removed = self + .inner + .children_default + .remove(child_info.storage_key()) + .map(|c| c.data.len()) + .unwrap_or(0); (true, num_removed as u32) } @@ -222,10 +224,13 @@ impl Externalities for BasicExternalities { target: "trie", "Refuse to clear prefix that is part of child storage key via main storage" ); - return (false, 0); + return (false, 0) } - let to_remove = self.inner.top.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + let to_remove = self + .inner + .top + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) .cloned() @@ -245,7 +250,9 @@ impl Externalities for BasicExternalities { _limit: Option, ) -> (bool, u32) { if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { - let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + let to_remove = child + .data + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) .cloned() @@ -261,20 +268,19 @@ impl Externalities for BasicExternalities { } } - fn storage_append( - &mut self, - key: Vec, - value: Vec, - ) { + fn storage_append(&mut self, key: Vec, value: Vec) { let current = self.inner.top.entry(key).or_default(); crate::ext::StorageAppend::new(current).append(value); } fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let prefixed_keys: Vec<_> = self.inner.children_default.iter().map(|(_k, v)| { - (v.child_info.prefixed_storage_key(), v.child_info.clone()) - }).collect(); + let prefixed_keys: Vec<_> = self + .inner + .children_default + .iter() + .map(|(_k, v)| (v.child_info.prefixed_storage_key(), v.child_info.clone())) + .collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. @@ -291,17 +297,16 @@ impl Externalities for BasicExternalities { Layout::::trie_root(self.inner.top.clone()).as_ref().into() } - fn child_storage_root( - &mut self, - child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); crate::in_memory_backend::new_in_mem::() - .child_storage_root(&child.child_info, delta).0 + .child_storage_root(&child.child_info, delta) + .0 } else { empty_child_trie_root::>() - }.encode() + } + .encode() } fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { @@ -358,7 +363,10 @@ impl sp_externalities::ExtensionStore for BasicExternalities { self.extensions.register_with_type_id(type_id, extension) } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if self.extensions.deregister(type_id) { Ok(()) } else { @@ -370,10 +378,11 @@ impl sp_externalities::ExtensionStore for BasicExternalities { #[cfg(test)] mod tests { use super::*; - use sp_core::map; - use sp_core::storage::{Storage, StorageChild}; - use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; + use sp_core::{ + map, + storage::{well_known_keys::CODE, Storage, StorageChild}, + }; #[test] fn commit_should_work() { @@ -381,7 +390,8 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); assert_eq!(&ext.storage_root()[..], &ROOT); } @@ -407,7 +417,7 @@ mod tests { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: child_info.to_owned(), } - ] + ], }); assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); @@ -437,10 +447,9 @@ mod tests { ], child_info: child_info.to_owned(), } - ] + ], }); - let res = ext.kill_child_storage(child_info, None); assert_eq!(res, (true, 3)); } diff --git a/substrate/primitives/state-machine/src/changes_trie/build.rs b/substrate/primitives/state-machine/src/changes_trie/build.rs index 38d1ab714e7f852420ca744de5e6c80def15decf..2c75ac236bf33d1aa6c41da7c5229f6273f948d6 100644 --- a/substrate/primitives/state-machine/src/changes_trie/build.rs +++ b/substrate/primitives/state-machine/src/changes_trie/build.rs @@ -17,23 +17,22 @@ //! Structures and functions required to build changes trie for given block. -use std::collections::BTreeMap; -use std::collections::btree_map::Entry; -use codec::{Decode, Encode}; -use hash_db::Hasher; -use num_traits::One; use crate::{ - StorageKey, backend::Backend, - overlayed_changes::{OverlayedChanges, OverlayedValue}, - trie_backend_essence::TrieBackendEssence, changes_trie::{ - AnchorBlockId, ConfigurationRange, Storage, BlockNumber, build_iterator::digest_build_iterator, - input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, + input::{ChildIndex, DigestIndex, ExtrinsicIndex, InputKey, InputPair}, + AnchorBlockId, BlockNumber, ConfigurationRange, Storage, }, + overlayed_changes::{OverlayedChanges, OverlayedValue}, + trie_backend_essence::TrieBackendEssence, + StorageKey, }; +use codec::{Decode, Encode}; +use hash_db::Hasher; +use num_traits::One; use sp_core::storage::{ChildInfo, PrefixedStorageKey}; +use std::collections::{btree_map::Entry, BTreeMap}; /// Prepare input pairs for building a changes trie of given block. /// @@ -45,66 +44,59 @@ pub(crate) fn prepare_input<'a, B, H, Number>( config: ConfigurationRange<'a, Number>, overlay: &'a OverlayedChanges, parent: &'a AnchorBlockId, -) -> Result<( - impl Iterator> + 'a, - Vec<(ChildIndex, impl Iterator> + 'a)>, +) -> Result< + ( + impl Iterator> + 'a, + Vec<(ChildIndex, impl Iterator> + 'a)>, Vec, - ), String> - where - B: Backend, - H: Hasher + 'a, - H::Out: Encode, - Number: BlockNumber, + ), + String, +> +where + B: Backend, + H: Hasher + 'a, + H::Out: Encode, + Number: BlockNumber, { let number = parent.number.clone() + One::one(); - let (extrinsics_input, children_extrinsics_input) = prepare_extrinsics_input( - backend, - &number, - overlay, - )?; - let (digest_input, mut children_digest_input, digest_input_blocks) = prepare_digest_input::( - parent, - config, - number, - storage, - )?; + let (extrinsics_input, children_extrinsics_input) = + prepare_extrinsics_input(backend, &number, overlay)?; + let (digest_input, mut children_digest_input, digest_input_blocks) = + prepare_digest_input::(parent, config, number, storage)?; let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); for (child_index, ext_iter) in children_extrinsics_input.into_iter() { let dig_iter = children_digest_input.remove(&child_index); children_digest.push(( child_index, - Some(ext_iter).into_iter().flatten() - .chain(dig_iter.into_iter().flatten()), + Some(ext_iter).into_iter().flatten().chain(dig_iter.into_iter().flatten()), )); } for (child_index, dig_iter) in children_digest_input.into_iter() { children_digest.push(( child_index, - None.into_iter().flatten() - .chain(Some(dig_iter).into_iter().flatten()), + None.into_iter().flatten().chain(Some(dig_iter).into_iter().flatten()), )); } - Ok(( - extrinsics_input.chain(digest_input), - children_digest, - digest_input_blocks, - )) + Ok((extrinsics_input.chain(digest_input), children_digest, digest_input_blocks)) } /// Prepare ExtrinsicIndex input pairs. fn prepare_extrinsics_input<'a, B, H, Number>( backend: &'a B, block: &Number, overlay: &'a OverlayedChanges, -) -> Result<( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - ), String> - where - B: Backend, - H: Hasher + 'a, - Number: BlockNumber, +) -> Result< + ( + impl Iterator> + 'a, + BTreeMap, impl Iterator> + 'a>, + ), + String, +> +where + B: Backend, + H: Hasher + 'a, + Number: BlockNumber, { let mut children_result = BTreeMap::new(); @@ -115,7 +107,9 @@ fn prepare_extrinsics_input<'a, B, H, Number>( }; let iter = prepare_extrinsics_input_inner( - backend, block, overlay, + backend, + block, + overlay, Some(child_info.clone()), child_changes, )?; @@ -132,12 +126,12 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( block: &Number, overlay: &'a OverlayedChanges, child_info: Option, - changes: impl Iterator -) -> Result> + 'a, String> - where - B: Backend, - H: Hasher, - Number: BlockNumber, + changes: impl Iterator, +) -> Result> + 'a, String> +where + B: Backend, + H: Hasher, + Number: BlockNumber, { changes .filter_map(|(k, v)| { @@ -148,68 +142,79 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( None } }) - .try_fold(BTreeMap::new(), |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { - match map.entry(k) { - Entry::Vacant(entry) => { - // ignore temporary values (values that have null value at the end of operation - // AND are not in storage at the beginning of operation - if let Some(child_info) = child_info.as_ref() { - if !overlay.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_child_storage(&child_info, k) - .map_err(|e| format!("{}", e))? { - return Ok(map); + .try_fold( + BTreeMap::new(), + |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { + match map.entry(k) { + Entry::Vacant(entry) => { + // ignore temporary values (values that have null value at the end of operation + // AND are not in storage at the beginning of operation + if let Some(child_info) = child_info.as_ref() { + if !overlay + .child_storage(child_info, k) + .map(|v| v.is_some()) + .unwrap_or_default() + { + if !backend + .exists_child_storage(&child_info, k) + .map_err(|e| format!("{}", e))? + { + return Ok(map) + } } - } - } else { - if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { - return Ok(map); + } else { + if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { + if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { + return Ok(map) + } } - } - }; - - let extrinsics = extrinsics.into_iter().collect(); - entry.insert((ExtrinsicIndex { - block: block.clone(), - key: k.to_vec(), - }, extrinsics)); - }, - Entry::Occupied(mut entry) => { - // we do not need to check for temporary values here, because entry is Occupied - // AND we are checking it before insertion - let entry_extrinsics = &mut entry.get_mut().1; - entry_extrinsics.extend( - extrinsics.into_iter() - ); - entry_extrinsics.sort(); - }, - } + }; - Ok(map) - }) + let extrinsics = extrinsics.into_iter().collect(); + entry.insert(( + ExtrinsicIndex { block: block.clone(), key: k.to_vec() }, + extrinsics, + )); + }, + Entry::Occupied(mut entry) => { + // we do not need to check for temporary values here, because entry is Occupied + // AND we are checking it before insertion + let entry_extrinsics = &mut entry.get_mut().1; + entry_extrinsics.extend(extrinsics.into_iter()); + entry_extrinsics.sort(); + }, + } + + Ok(map) + }, + ) .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v))) } - /// Prepare DigestIndex input pairs. fn prepare_digest_input<'a, H, Number>( parent: &'a AnchorBlockId, config: ConfigurationRange, block: Number, storage: &'a dyn Storage, -) -> Result<( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, +) -> Result< + ( + impl Iterator> + 'a, + BTreeMap, impl Iterator> + 'a>, Vec, - ), String> - where - H: Hasher, - H::Out: 'a + Encode, - Number: BlockNumber, + ), + String, +> +where + H: Hasher, + H::Out: 'a + Encode, + Number: BlockNumber, { let build_skewed_digest = config.end.as_ref() == Some(&block); let block_for_digest = if build_skewed_digest { - config.config.next_max_level_digest_range(config.zero.clone(), block.clone()) + config + .config + .next_max_level_digest_range(config.zero.clone(), block.clone()) .map(|(_, end)| end) .unwrap_or_else(|| block.clone()) } else { @@ -217,128 +222,158 @@ fn prepare_digest_input<'a, H, Number>( }; let digest_input_blocks = digest_build_iterator(config, block_for_digest).collect::>(); - digest_input_blocks.clone().into_iter() + digest_input_blocks + .clone() + .into_iter() .try_fold( - (BTreeMap::new(), BTreeMap::new()), move |(mut map, mut child_map), digest_build_block| { - let extrinsic_prefix = ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); - let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); - let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); - let trie_root = storage.root(parent, digest_build_block.clone())?; - let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; - - let insert_to_map = |map: &mut BTreeMap<_,_>, key: StorageKey| { - match map.entry(key.clone()) { - Entry::Vacant(entry) => { - entry.insert((DigestIndex { - block: block.clone(), - key, - }, vec![digest_build_block.clone()])); - }, - Entry::Occupied(mut entry) => { - // DigestIndexValue must be sorted. Here we are relying on the fact that digest_build_iterator() - // returns blocks in ascending order => we only need to check for duplicates - // - // is_dup_block could be true when key has been changed in both digest block - // AND other blocks that it covers - let is_dup_block = entry.get().1.last() == Some(&digest_build_block); - if !is_dup_block { - entry.get_mut().1.push(digest_build_block.clone()); - } - }, - } - }; - - // try to get all updated keys from cache - let populated_from_cache = storage.with_cached_changed_keys( - &trie_root, - &mut |changed_keys| { - for (storage_key, changed_keys) in changed_keys { - let map = match storage_key { - Some(storage_key) => child_map - .entry(ChildIndex:: { - block: block.clone(), - storage_key: storage_key.clone(), - }) - .or_default(), - None => &mut map, - }; - for changed_key in changed_keys.iter().cloned() { - insert_to_map(map, changed_key); - } + (BTreeMap::new(), BTreeMap::new()), + move |(mut map, mut child_map), digest_build_block| { + let extrinsic_prefix = + ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); + let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); + let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); + let trie_root = storage.root(parent, digest_build_block.clone())?; + let trie_root = trie_root.ok_or_else(|| { + format!("No changes trie root for block {}", digest_build_block.clone()) + })?; + + let insert_to_map = |map: &mut BTreeMap<_, _>, key: StorageKey| { + match map.entry(key.clone()) { + Entry::Vacant(entry) => { + entry.insert(( + DigestIndex { block: block.clone(), key }, + vec![digest_build_block.clone()], + )); + }, + Entry::Occupied(mut entry) => { + // DigestIndexValue must be sorted. Here we are relying on the fact that digest_build_iterator() + // returns blocks in ascending order => we only need to check for duplicates + // + // is_dup_block could be true when key has been changed in both digest block + // AND other blocks that it covers + let is_dup_block = entry.get().1.last() == Some(&digest_build_block); + if !is_dup_block { + entry.get_mut().1.push(digest_build_block.clone()); + } + }, } + }; + + // try to get all updated keys from cache + let populated_from_cache = + storage.with_cached_changed_keys(&trie_root, &mut |changed_keys| { + for (storage_key, changed_keys) in changed_keys { + let map = match storage_key { + Some(storage_key) => child_map + .entry(ChildIndex:: { + block: block.clone(), + storage_key: storage_key.clone(), + }) + .or_default(), + None => &mut map, + }; + for changed_key in changed_keys.iter().cloned() { + insert_to_map(map, changed_key); + } + } + }); + if populated_from_cache { + return Ok((map, child_map)) } - ); - if populated_from_cache { - return Ok((map, child_map)); - } - let mut children_roots = BTreeMap::::new(); - { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - - trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| - if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut key) { - if let Ok(value) = >::decode(&mut value) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.insert(trie_key.storage_key, trie_root); + let mut children_roots = BTreeMap::::new(); + { + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + trie_root, + ); + + trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { + if let Ok(InputKey::ChildIndex::(trie_key)) = + Decode::decode(&mut key) + { + if let Ok(value) = >::decode(&mut value) { + let mut trie_root = ::Out::default(); + trie_root.as_mut().copy_from_slice(&value[..]); + children_roots.insert(trie_key.storage_key, trie_root); + } } }); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut key) { - insert_to_map(&mut map, trie_key.key); + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut key) { - insert_to_map(&mut map, trie_key.key); + trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { + if let Ok(InputKey::DigestIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - } + } - for (storage_key, trie_root) in children_roots.into_iter() { - let child_index = ChildIndex:: { - block: block.clone(), - storage_key, - }; + for (storage_key, trie_root) in children_roots.into_iter() { + let child_index = ChildIndex:: { block: block.clone(), storage_key }; - let mut map = child_map.entry(child_index).or_default(); - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut key) { - insert_to_map(&mut map, trie_key.key); + let mut map = child_map.entry(child_index).or_default(); + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + trie_root, + ); + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut key) { - insert_to_map(&mut map, trie_key.key); + trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { + if let Ok(InputKey::DigestIndex::(trie_key)) = + Decode::decode(&mut key) + { + insert_to_map(&mut map, trie_key.key); + } }); - } - Ok((map, child_map)) + } + Ok((map, child_map)) + }, + ) + .map(|(pairs, child_pairs)| { + ( + pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), + child_pairs + .into_iter() + .map(|(sk, pairs)| { + (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v))) + }) + .collect(), + digest_input_blocks, + ) }) - .map(|(pairs, child_pairs)| ( - pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), - child_pairs.into_iter().map(|(sk, pairs)| - (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)))).collect(), - digest_input_blocks, - )) } #[cfg(test)] mod test { - use sp_core::Blake2Hasher; - use crate::InMemoryBackend; - use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; - use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; use super::*; + use crate::{ + changes_trie::{ + build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, + storage::InMemoryStorage, + Configuration, RootsStorage, + }, + InMemoryBackend, + }; + use sp_core::Blake2Hasher; - fn prepare_for_build(zero: u64) -> ( + fn prepare_for_build( + zero: u64, + ) -> ( InMemoryBackend, InMemoryStorage, OverlayedChanges, @@ -353,57 +388,150 @@ mod test { (vec![103], vec![255]), (vec![104], vec![255]), (vec![105], vec![255]), - ].into_iter().collect::>().into(); + ] + .into_iter() + .collect::>() + .into(); let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); - let storage = InMemoryStorage::with_inputs(vec![ - (zero + 1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![105] }, vec![0, 2, 4]), - ]), - (zero + 2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0]), - ]), - (zero + 3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![100] }, vec![0]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![105] }, vec![1]), - ]), - (zero + 4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]), - (zero + 5, Vec::new()), - (zero + 6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 6, key: vec![105] }, vec![2]), - ]), - (zero + 7, Vec::new()), - (zero + 8, vec![ - InputPair::DigestIndex(DigestIndex { block: zero + 8, key: vec![105] }, vec![zero + 6]), - ]), - (zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()), - (zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()), - ], vec![(prefixed_child_trie_key1.clone(), vec![ - (zero + 1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![105] }, vec![0, 2, 4]), - ]), - (zero + 2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0]), - ]), - (zero + 4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - ]), - ]), - ]); + let storage = InMemoryStorage::with_inputs( + vec![ + ( + zero + 1, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![100] }, + vec![1, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![101] }, + vec![0, 2], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![105] }, + vec![0, 2, 4], + ), + ], + ), + ( + zero + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 2, key: vec![102] }, + vec![0], + )], + ), + ( + zero + 3, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 3, key: vec![100] }, + vec![0], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 3, key: vec![105] }, + vec![1], + ), + ], + ), + ( + zero + 4, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![101] }, + vec![1], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![103] }, + vec![0, 1], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1, zero + 3], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1, zero + 3], + ), + ], + ), + (zero + 5, Vec::new()), + ( + zero + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 6, key: vec![105] }, + vec![2], + )], + ), + (zero + 7, Vec::new()), + ( + zero + 8, + vec![InputPair::DigestIndex( + DigestIndex { block: zero + 8, key: vec![105] }, + vec![zero + 6], + )], + ), + (zero + 9, Vec::new()), + (zero + 10, Vec::new()), + (zero + 11, Vec::new()), + (zero + 12, Vec::new()), + (zero + 13, Vec::new()), + (zero + 14, Vec::new()), + (zero + 15, Vec::new()), + ], + vec![( + prefixed_child_trie_key1.clone(), + vec![ + ( + zero + 1, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![100] }, + vec![1, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![101] }, + vec![0, 2], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 1, key: vec![105] }, + vec![0, 2, 4], + ), + ], + ), + ( + zero + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 2, key: vec![102] }, + vec![0], + )], + ), + ( + zero + 4, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 2, key: vec![102] }, + vec![0, 3], + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2], + ), + ], + ), + ], + )], + ); let mut changes = OverlayedChanges::default(); changes.set_collect_extrinsics(true); @@ -446,12 +574,11 @@ mod test { (backend, storage, changes, config) } - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { config, zero, end: None } } #[test] @@ -467,24 +594,48 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![103] }, vec![0, 1]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]), - ]), - (ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]), - ]), - ]); - + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![103] }, + vec![0, 1] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, + vec![0, 2, 3] + ),] + ), + ( + ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 5, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -505,33 +656,82 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), - ]), - (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), - ]), - ]); + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1, zero + 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1, zero + 3] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1] + ), + ] + ), + ( + ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -552,31 +752,74 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![100] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![101] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![103] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![105] }, vec![zero + 4, zero + 8]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), - ]), - (ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), - ]), - ]); + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![100] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![101] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![102] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![103] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![105] }, + vec![zero + 4, zero + 8] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 16, key: vec![102] }, + vec![zero + 4] + ), + ] + ), + ( + ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 16, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -591,38 +834,67 @@ mod test { let parent = AnchorBlockId { hash: Default::default(), number: zero + 10 }; let mut configuration_range = configuration_range(&config, zero); - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range.clone(), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), - ]); + let changes_trie_nodes = + prepare_input(&backend, &storage, configuration_range.clone(), &changes, &parent) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![103] }, + vec![0, 1] + ), + ] + ); configuration_range.end = Some(zero + 11); - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range, - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![100] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![101] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![102] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![103] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![105] }, vec![zero + 4, zero + 8]), - ]); + let changes_trie_nodes = + prepare_input(&backend, &storage, configuration_range, &changes, &parent).unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 11, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![100] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![101] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![102] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![103] }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 11, key: vec![105] }, + vec![zero + 4, zero + 8] + ), + ] + ); } test_with_zero(0); @@ -647,34 +919,82 @@ mod test { configuration_range(&config, zero), &changes, &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), - ]), - (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), - ]), - ]); - + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![101] }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![103] }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1, zero + 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1, zero + 3] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![100] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![101] }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![102] }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { block: zero + 4, key: vec![105] }, + vec![zero + 1] + ), + ] + ), + ( + ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: zero + 4, key: vec![100] }, + vec![0, 2] + ),] + ), + ] + ); } test_with_zero(0); @@ -710,44 +1030,50 @@ mod test { .complete(4, &trie_root4); storage.cache_mut().perform(cached_data4); - let (root_changes_trie_nodes, child_changes_tries_nodes, _) = prepare_input( - &backend, - &storage, - configuration_range(&config, 0), - &changes, - &parent, - ).unwrap(); - assert_eq!(root_changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), - ]); + let (root_changes_trie_nodes, child_changes_tries_nodes, _) = + prepare_input(&backend, &storage, configuration_range(&config, 0), &changes, &parent) + .unwrap(); + assert_eq!( + root_changes_trie_nodes.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16, key: vec![100] }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), + ] + ); let child_changes_tries_nodes = child_changes_tries_nodes .into_iter() .map(|(k, i)| (k, i.collect::>())) .collect::>(); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { - block: 16u64, - storage_key: child_trie_key1.clone(), - }).unwrap(), + child_changes_tries_nodes + .get(&ChildIndex { block: 16u64, storage_key: child_trie_key1.clone() }) + .unwrap(), &vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2, 3]), - + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16u64, key: vec![100] }, + vec![0, 2, 3] + ), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![103] }, vec![4]), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![104] }, vec![4]), ], ); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }).unwrap(), + child_changes_tries_nodes + .get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }) + .unwrap(), &vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), - + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16u64, key: vec![100] }, + vec![0, 2] + ), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![105] }, vec![4]), InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![106] }, vec![4]), ], diff --git a/substrate/primitives/state-machine/src/changes_trie/build_cache.rs b/substrate/primitives/state-machine/src/changes_trie/build_cache.rs index 9b2190ae1951fa934b52b59cb6dcb6bd2bc27508..67098d4d72040e882e945907a3febb12bed479af 100644 --- a/substrate/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/substrate/primitives/state-machine/src/changes_trie/build_cache.rs @@ -78,20 +78,20 @@ pub(crate) struct IncompleteCachedBuildData { } impl BuildCache - where - N: Eq + ::std::hash::Hash, - H: Eq + ::std::hash::Hash + Clone, +where + N: Eq + ::std::hash::Hash, + H: Eq + ::std::hash::Hash + Clone, { /// Create new changes trie build cache. pub fn new() -> Self { - BuildCache { - roots_by_number: HashMap::new(), - changed_keys: HashMap::new(), - } + BuildCache { roots_by_number: HashMap::new(), changed_keys: HashMap::new() } } /// Get cached changed keys for changes trie with given root. - pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { + pub fn get( + &self, + root: &H, + ) -> Option<&HashMap, HashSet>> { self.changed_keys.get(&root) } @@ -158,7 +158,9 @@ impl IncompleteCacheAction { pub(crate) fn set_digest_input_blocks(self, digest_input_blocks: Vec) -> Self { match self { IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData(build_data.set_digest_input_blocks(digest_input_blocks)), + IncompleteCacheAction::CacheBuildData( + build_data.set_digest_input_blocks(digest_input_blocks), + ), IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, } } @@ -180,10 +182,7 @@ impl IncompleteCacheAction { impl IncompleteCachedBuildData { /// Create new cached data. pub(crate) fn new() -> Self { - IncompleteCachedBuildData { - digest_input_blocks: Vec::new(), - changed_keys: HashMap::new(), - } + IncompleteCachedBuildData { digest_input_blocks: Vec::new(), changed_keys: HashMap::new() } } fn complete(self, block: N, trie_root: H) -> CachedBuildData { @@ -232,30 +231,42 @@ mod tests { #[test] fn obsolete_entries_are_purged_when_new_ct_is_built() { let mut cache = BuildCache::::new(); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![1]].into_iter().collect()) - .complete(1, 1))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![2]].into_iter().collect()) - .complete(2, 2))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![3]].into_iter().collect()) - .complete(3, 3))); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![1]].into_iter().collect()) + .complete(1, 1), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![2]].into_iter().collect()) + .complete(2, 2), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![3]].into_iter().collect()) + .complete(3, 3), + )); assert_eq!(cache.changed_keys.len(), 3); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .set_digest_input_blocks(vec![1, 2, 3]) - .complete(4, 4))); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .set_digest_input_blocks(vec![1, 2, 3]) + .complete(4, 4), + )); assert_eq!(cache.changed_keys.len(), 1); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![8]].into_iter().collect()) - .complete(8, 8))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![12]].into_iter().collect()) - .complete(12, 12))); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![8]].into_iter().collect()) + .complete(8, 8), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![12]].into_iter().collect()) + .complete(12, 12), + )); assert_eq!(cache.changed_keys.len(), 3); diff --git a/substrate/primitives/state-machine/src/changes_trie/build_iterator.rs b/substrate/primitives/state-machine/src/changes_trie/build_iterator.rs index 43089d819b66d08d8d3a333de4446bb62ddc2a94..d4adc99d109fc4faa81bfdf2317853b8608cf83a 100644 --- a/substrate/primitives/state-machine/src/changes_trie/build_iterator.rs +++ b/substrate/primitives/state-machine/src/changes_trie/build_iterator.rs @@ -18,8 +18,8 @@ //! Structures and functions to return blocks whose changes are to be included //! in given block's changes trie. +use crate::changes_trie::{BlockNumber, ConfigurationRange}; use num_traits::Zero; -use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// Returns iterator of OTHER blocks that are required for inclusion into /// changes trie of given block. Blocks are guaranteed to be returned in @@ -31,13 +31,19 @@ pub fn digest_build_iterator<'a, Number: BlockNumber>( block: Number, ) -> DigestBuildIterator { // prepare digest build parameters - let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) { + let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) + { Some((current_level, digest_interval, digest_step)) => (current_level, digest_interval, digest_step), None => return DigestBuildIterator::empty(), }; - DigestBuildIterator::new(block.clone(), config.end.unwrap_or(block), config.config.digest_interval, digest_step) + DigestBuildIterator::new( + block.clone(), + config.end.unwrap_or(block), + config.config.digest_interval, + digest_step, + ) } /// Changes trie build iterator that returns numbers of OTHER blocks that are @@ -56,7 +62,6 @@ pub struct DigestBuildIterator { max_step: u32, // Mutable data below: - /// Step of current blocks range. current_step: u32, /// Reverse step of current blocks range. @@ -98,7 +103,7 @@ impl Iterator for DigestBuildIterator { if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { if next < self.end { self.last_block = Some(next.clone()); - return Some(next); + return Some(next) } } @@ -112,14 +117,16 @@ impl Iterator for DigestBuildIterator { self.current_step_reverse * self.digest_interval }; if next_step_reverse > self.max_step { - return None; + return None } self.current_step_reverse = next_step_reverse; self.current_range = Some(BlocksRange::new( match self.last_block.clone() { Some(last_block) => last_block + self.current_step.into(), - None => self.block.clone() - (self.current_step * self.digest_interval - self.current_step).into(), + None => + self.block.clone() - + (self.current_step * self.digest_interval - self.current_step).into(), }, self.block.clone(), self.current_step.into(), @@ -143,11 +150,7 @@ struct BlocksRange { impl BlocksRange { pub fn new(begin: Number, end: Number, step: Number) -> Self { - BlocksRange { - current: begin, - end, - step, - } + BlocksRange { current: begin, end, step } } } @@ -156,7 +159,7 @@ impl Iterator for BlocksRange { fn next(&mut self) -> Option { if self.current >= self.end { - return None; + return None } let current = Some(self.current.clone()); @@ -167,8 +170,8 @@ impl Iterator for BlocksRange { #[cfg(test)] mod tests { - use crate::changes_trie::Configuration; use super::*; + use crate::changes_trie::Configuration; fn digest_build_iterator( digest_interval: u32, @@ -179,10 +182,7 @@ mod tests { ) -> DigestBuildIterator { super::digest_build_iterator( ConfigurationRange { - config: &Configuration { - digest_interval, - digest_levels, - }, + config: &Configuration { digest_interval, digest_levels }, zero, end, }, @@ -215,9 +215,21 @@ mod tests { fn test_with_zero(zero: u64) { let empty = (0, 0, 0); assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 0), empty, "block is 0"); - assert_eq!(digest_build_iterator_basic(0, 16, zero, zero + 64), empty, "digest_interval is 0"); - assert_eq!(digest_build_iterator_basic(1, 16, zero, zero + 64), empty, "digest_interval is 1"); - assert_eq!(digest_build_iterator_basic(4, 0, zero, zero + 64), empty, "digest_levels is 0"); + assert_eq!( + digest_build_iterator_basic(0, 16, zero, zero + 64), + empty, + "digest_interval is 0" + ); + assert_eq!( + digest_build_iterator_basic(1, 16, zero, zero + 64), + empty, + "digest_interval is 1" + ); + assert_eq!( + digest_build_iterator_basic(4, 0, zero, zero + 64), + empty, + "digest_levels is 0" + ); assert_eq!( digest_build_iterator_basic(4, 16, zero, zero + 1), empty, @@ -238,12 +250,11 @@ mod tests { empty, "digest is not required for this block", ); - assert_eq!(digest_build_iterator_basic( - ::std::u32::MAX / 2 + 1, - 16, - zero, - ::std::u64::MAX, - ), empty, "digest_interval * 2 is greater than u64::MAX"); + assert_eq!( + digest_build_iterator_basic(::std::u32::MAX / 2 + 1, 16, zero, ::std::u64::MAX,), + empty, + "digest_interval * 2 is greater than u64::MAX" + ); } test_with_zero(0); @@ -326,18 +337,37 @@ mod tests { #[test] fn digest_iterator_returns_level1_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 16, None), + assert_eq!( + digest_build_iterator_blocks(16, 1, zero, zero + 16, None), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 256, None), + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 1, zero, zero + 256, None), [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 32, None), + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 32, None), [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), - [4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079] - .iter().map(|item| zero + item).collect::>()); + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), + [ + 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, + 4078, 4079 + ] + .iter() + .map(|item| zero + item) + .collect::>() + ); } test_with_zero(0); @@ -348,21 +378,30 @@ mod tests { #[test] fn digest_iterator_returns_level1_and_level2_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 256, None), + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 256, None), [ // level2 points to previous 16-1 level1 digests: 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, // level2 is a level1 digest of 16-1 previous blocks: 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - ].iter().map(|item| zero + item).collect::>(), + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), [ // level2 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level2 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - ].iter().map(|item| zero + item).collect::>(), + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, + 4064, 4080, // level2 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, + 4094, 4095, + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } @@ -374,15 +413,20 @@ mod tests { #[test] fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), [ // level3 points to previous 16-1 level2 digests: - 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, - // level3 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level3 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - ].iter().map(|item| zero + item).collect::>(), + 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, + 3840, // level3 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, + 4064, 4080, // level3 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, + 4094, 4095, + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } @@ -394,7 +438,8 @@ mod tests { #[test] fn digest_iterator_returns_skewed_digest_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), [ // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: 256, 512, 768, 1024, 1280, @@ -402,7 +447,10 @@ mod tests { 1296, 1312, 1328, // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 9: 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, - ].iter().map(|item| zero + item).collect::>(), + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } @@ -414,14 +462,18 @@ mod tests { #[test] fn digest_iterator_returns_skewed_digest_blocks_skipping_level() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), [ // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: 256, 512, 768, 1024, 1280, // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY L1-digests: // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 3: 1281, 1282, 1283, - ].iter().map(|item| zero + item).collect::>(), + ] + .iter() + .map(|item| zero + item) + .collect::>(), ); } diff --git a/substrate/primitives/state-machine/src/changes_trie/changes_iterator.rs b/substrate/primitives/state-machine/src/changes_trie/changes_iterator.rs index be35581e7514dc34fbb15167f4550e8be4fc440c..8b7d7c5781091f3466deb70a6d880f74f57a9ea3 100644 --- a/substrate/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/substrate/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -18,20 +18,22 @@ //! Functions + iterator that traverses changes tries and returns all //! (block, extrinsic) pairs where given key has been changed. -use std::cell::RefCell; -use std::collections::VecDeque; -use codec::{Decode, Encode, Codec}; +use crate::{ + changes_trie::{ + input::{ChildIndex, DigestIndex, DigestIndexValue, ExtrinsicIndex, ExtrinsicIndexValue}, + storage::{InMemoryStorage, TrieBackendAdapter}, + surface_iterator::{surface_iterator, SurfaceIterator}, + AnchorBlockId, BlockNumber, ConfigurationRange, RootsStorage, Storage, + }, + proving_backend::ProvingBackendRecorder, + trie_backend_essence::TrieBackendEssence, +}; +use codec::{Codec, Decode, Encode}; use hash_db::Hasher; use num_traits::Zero; use sp_core::storage::PrefixedStorageKey; use sp_trie::Recorder; -use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; -use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; -use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; -use crate::changes_trie::input::ChildIndex; -use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator}; -use crate::proving_backend::ProvingBackendRecorder; -use crate::trie_backend_essence::{TrieBackendEssence}; +use std::{cell::RefCell, collections::VecDeque}; /// Return changes of given key at given blocks range. /// `max` is the number of best known block. @@ -57,12 +59,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( begin: begin.clone(), end, config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, + surface: surface_iterator(config, max, begin, end.number.clone())?, extrinsics: Default::default(), blocks: Default::default(), @@ -72,7 +69,6 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( }) } - /// Returns proof of changes of given key at given blocks range. /// `max` is the number of best known block. pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( @@ -83,7 +79,10 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( max: Number, storage_key: Option<&PrefixedStorageKey>, key: &[u8], -) -> Result>, String> where H::Out: Codec { +) -> Result>, String> +where + H::Out: Codec, +{ // we can't query any roots before root let max = std::cmp::min(max, end.number.clone()); @@ -96,12 +95,7 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( begin: begin.clone(), end, config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, + surface: surface_iterator(config, max, begin, end.number.clone())?, extrinsics: Default::default(), blocks: Default::default(), @@ -130,8 +124,11 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( end: &AnchorBlockId, max: Number, storage_key: Option<&PrefixedStorageKey>, - key: &[u8] -) -> Result, String> where H::Out: Encode { + key: &[u8], +) -> Result, String> +where + H::Out: Encode, +{ key_changes_proof_check_with_db( config, roots_storage, @@ -153,8 +150,11 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( end: &AnchorBlockId, max: Number, storage_key: Option<&PrefixedStorageKey>, - key: &[u8] -) -> Result, String> where H::Out: Encode { + key: &[u8], +) -> Result, String> +where + H::Out: Encode, +{ // we can't query any roots before root let max = std::cmp::min(max, end.number.clone()); @@ -167,28 +167,24 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( begin: begin.clone(), end, config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, + surface: surface_iterator(config, max, begin, end.number.clone())?, extrinsics: Default::default(), blocks: Default::default(), _hasher: ::std::marker::PhantomData::::default(), }, - }.collect() + } + .collect() } /// Drilldown iterator - receives 'digest points' from surface iterator and explores /// every point until extrinsic is found. pub struct DrilldownIteratorEssence<'a, H, Number> - where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, +where + H: Hasher, + Number: BlockNumber, + H::Out: 'a, { storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], @@ -206,14 +202,14 @@ pub struct DrilldownIteratorEssence<'a, H, Number> } impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> - where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, +where + H: Hasher, + Number: BlockNumber, + H::Out: 'a, { pub fn next(&mut self, trie_reader: F) -> Option> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, + where + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, { match self.do_next(trie_reader) { Ok(Some(res)) => Some(Ok(res)), @@ -223,25 +219,26 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> } fn do_next(&mut self, mut trie_reader: F) -> Result, String> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, + where + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, { loop { if let Some((block, extrinsic)) = self.extrinsics.pop_front() { - return Ok(Some((block, extrinsic))); + return Ok(Some((block, extrinsic))) } if let Some((block, level)) = self.blocks.pop_front() { // not having a changes trie root is an error because: // we never query roots for future blocks // AND trie roots for old blocks are known (both on full + light node) - let trie_root = self.roots_storage.root(&self.end, block.clone())? - .ok_or_else(|| format!("Changes trie root for block {} is not found", block.clone()))?; + let trie_root = + self.roots_storage.root(&self.end, block.clone())?.ok_or_else(|| { + format!("Changes trie root for block {} is not found", block.clone()) + })?; let trie_root = if let Some(storage_key) = self.storage_key { - let child_key = ChildIndex { - block: block.clone(), - storage_key: storage_key.clone(), - }.encode(); + let child_key = + ChildIndex { block: block.clone(), storage_key: storage_key.clone() } + .encode(); if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? .and_then(|v| >::decode(&mut &v[..]).ok()) .map(|v| { @@ -251,7 +248,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> }) { trie_root } else { - continue; + continue } } else { trie_root @@ -260,18 +257,24 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> // only return extrinsics for blocks before self.max // most of blocks will be filtered out before pushing to `self.blocks` // here we just throwing away changes at digest blocks we're processing - debug_assert!(block >= self.begin, "We shall not touch digests earlier than a range' begin"); + debug_assert!( + block >= self.begin, + "We shall not touch digests earlier than a range' begin" + ); if block <= self.end.number { - let extrinsics_key = ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); + let extrinsics_key = + ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); let extrinsics = trie_reader(self.storage, trie_root, &extrinsics_key); if let Some(extrinsics) = extrinsics? { if let Ok(extrinsics) = ExtrinsicIndexValue::decode(&mut &extrinsics[..]) { - self.extrinsics.extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); + self.extrinsics + .extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); } } } - let blocks_key = DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); + let blocks_key = + DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); let blocks = trie_reader(self.storage, trie_root, &blocks_key); if let Some(blocks) = blocks? { if let Ok(blocks) = >::decode(&mut &blocks[..]) { @@ -280,23 +283,35 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> let begin = self.begin.clone(); let end = self.end.number.clone(); let config = self.config.clone(); - self.blocks.extend(blocks.into_iter() - .rev() - .filter(|b| level.map(|level| level > 1).unwrap_or(true) || (*b >= begin && *b <= end)) - .map(|b| { - let prev_level = level - .map(|level| Some(level - 1)) - .unwrap_or_else(|| - Some(config.config.digest_level_at_block(config.zero.clone(), b.clone()) - .map(|(level, _, _)| level) - .unwrap_or_else(|| Zero::zero()))); - (b, prev_level) - }) + self.blocks.extend( + blocks + .into_iter() + .rev() + .filter(|b| { + level.map(|level| level > 1).unwrap_or(true) || + (*b >= begin && *b <= end) + }) + .map(|b| { + let prev_level = + level.map(|level| Some(level - 1)).unwrap_or_else(|| { + Some( + config + .config + .digest_level_at_block( + config.zero.clone(), + b.clone(), + ) + .map(|(level, _, _)| level) + .unwrap_or_else(|| Zero::zero()), + ) + }); + (b, prev_level) + }), ); } } - continue; + continue } match self.surface.next() { @@ -310,46 +325,50 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> /// Exploring drilldown operator. pub struct DrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> - where H::Out: Encode +where + H::Out: Encode, { type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { - self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) + self.essence.next(|storage, root, key| { + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key) + }) } } /// Proving drilldown iterator. struct ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, proof_recorder: RefCell>, } impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { /// Consume the iterator, extracting the gathered proof in lexicographical order /// by value. pub fn extract_proof(self) -> Vec> { - self.proof_recorder.into_inner().drain() + self.proof_recorder + .into_inner() + .drain() .into_iter() .map(|n| n.data.to_vec()) .collect() @@ -357,32 +376,34 @@ impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> } impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a + Codec, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a + Codec, { type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { - let proof_recorder = &mut *self.proof_recorder.try_borrow_mut() + let proof_recorder = &mut *self + .proof_recorder + .try_borrow_mut() .expect("only fails when already borrowed; storage() is non-reentrant; qed"); - self.essence.next(|storage, root, key| + self.essence.next(|storage, root, key| { ProvingBackendRecorder::<_, H> { backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), proof_recorder, - }.storage(key)) + } + .storage(key) + }) } } #[cfg(test)] mod tests { - use std::iter::FromIterator; - use crate::changes_trie::Configuration; - use crate::changes_trie::input::InputPair; - use crate::changes_trie::storage::InMemoryStorage; - use sp_runtime::traits::BlakeTwo256; use super::*; + use crate::changes_trie::{input::InputPair, storage::InMemoryStorage, Configuration}; + use sp_runtime::traits::BlakeTwo256; + use std::iter::FromIterator; fn child_key() -> PrefixedStorageKey { let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); @@ -391,64 +412,98 @@ mod tests { fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let backend = InMemoryStorage::with_inputs(vec![ - // digest: 1..4 => [(3, 0)] - (1, vec![ - ]), - (2, vec![ - ]), - (3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![42] }, vec![0]), - ]), - (4, vec![ - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3]), - ]), - // digest: 5..8 => [(6, 3), (8, 1+2)] - (5, vec![]), - (6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 6, key: vec![42] }, vec![3]), - ]), - (7, vec![]), - (8, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 8, key: vec![42] }, vec![1, 2]), - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), - ]), - // digest: 9..12 => [] - (9, vec![]), - (10, vec![]), - (11, vec![]), - (12, vec![]), - // digest: 0..16 => [4, 8] - (13, vec![]), - (14, vec![]), - (15, vec![]), - (16, vec![ - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]), - ]), - ], vec![(child_key(), vec![ - (1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![42] }, vec![0]), - ]), - (2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 2, key: vec![42] }, vec![3]), - ]), - (16, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![42] }, vec![5]), - - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![2]), - ]), - ]), - ]); + let backend = InMemoryStorage::with_inputs( + vec![ + // digest: 1..4 => [(3, 0)] + (1, vec![]), + (2, vec![]), + ( + 3, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 3, key: vec![42] }, + vec![0], + )], + ), + (4, vec![InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3])]), + // digest: 5..8 => [(6, 3), (8, 1+2)] + (5, vec![]), + ( + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 6, key: vec![42] }, + vec![3], + )], + ), + (7, vec![]), + ( + 8, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 8, key: vec![42] }, + vec![1, 2], + ), + InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), + ], + ), + // digest: 9..12 => [] + (9, vec![]), + (10, vec![]), + (11, vec![]), + (12, vec![]), + // digest: 0..16 => [4, 8] + (13, vec![]), + (14, vec![]), + (15, vec![]), + ( + 16, + vec![InputPair::DigestIndex( + DigestIndex { block: 16, key: vec![42] }, + vec![4, 8], + )], + ), + ], + vec![( + child_key(), + vec![ + ( + 1, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 1, key: vec![42] }, + vec![0], + )], + ), + ( + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 2, key: vec![42] }, + vec![3], + )], + ), + ( + 16, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { block: 16, key: vec![42] }, + vec![5], + ), + InputPair::DigestIndex( + DigestIndex { block: 16, key: vec![42] }, + vec![2], + ), + ], + ), + ], + )], + ); (config, backend) } - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { config, zero, end: None } } #[test] @@ -462,7 +517,8 @@ mod tests { 16, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); let drilldown_result = key_changes::( @@ -473,7 +529,8 @@ mod tests { 4, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); let drilldown_result = key_changes::( @@ -484,7 +541,8 @@ mod tests { 4, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); let drilldown_result = key_changes::( @@ -495,7 +553,8 @@ mod tests { 7, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); let drilldown_result = key_changes::( @@ -506,7 +565,8 @@ mod tests { 8, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); let drilldown_result = key_changes::( @@ -517,7 +577,8 @@ mod tests { 8, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3)])); } @@ -534,7 +595,9 @@ mod tests { 1000, None, &[42], - ).and_then(|i| i.collect::, _>>()).is_err()); + ) + .and_then(|i| i.collect::, _>>()) + .is_err()); assert!(key_changes::( configuration_range(&config, 0), @@ -544,7 +607,9 @@ mod tests { 1000, Some(&child_key()), &[42], - ).and_then(|i| i.collect::, _>>()).is_err()); + ) + .and_then(|i| i.collect::, _>>()) + .is_err()); } #[test] @@ -558,7 +623,8 @@ mod tests { 50, None, &[42], - ).is_err()); + ) + .is_err()); assert!(key_changes::( configuration_range(&config, 0), &storage, @@ -567,10 +633,10 @@ mod tests { 100, None, &[42], - ).is_err()); + ) + .is_err()); } - #[test] fn proving_drilldown_iterator_works() { // happens on remote full node: @@ -578,13 +644,27 @@ mod tests { // create drilldown iterator that records all trie nodes during drilldown let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof = key_changes_proof::( - configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]).unwrap(); + configuration_range(&remote_config, 0), + &remote_storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + None, + &[42], + ) + .unwrap(); let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof_child = key_changes_proof::( - configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]).unwrap(); + configuration_range(&remote_config, 0), + &remote_storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + Some(&child_key()), + &[42], + ) + .unwrap(); // happens on local light node: @@ -592,14 +672,28 @@ mod tests { let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); let local_result = key_changes_proof_check::( - configuration_range(&local_config, 0), &local_storage, remote_proof, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]); + configuration_range(&local_config, 0), + &local_storage, + remote_proof, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + None, + &[42], + ); let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); let local_result_child = key_changes_proof_check::( - configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]); + configuration_range(&local_config, 0), + &local_storage, + remote_proof_child, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + Some(&child_key()), + &[42], + ); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); @@ -620,12 +714,22 @@ mod tests { // regular blocks: 89, 90, 91 let mut input = (1u64..92u64).map(|b| (b, vec![])).collect::>(); // changed at block#63 and covered by L3 digest at block#64 - input[63 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); - input[64 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); + input[63 - 1] + .1 + .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); + input[64 - 1] + .1 + .push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); // changed at block#79 and covered by L2 digest at block#80 + skewed digest at block#91 - input[79 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); - input[80 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); - input[91 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); + input[79 - 1] + .1 + .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); + input[80 - 1] + .1 + .push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); + input[91 - 1] + .1 + .push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); let storage = InMemoryStorage::with_inputs(input, vec![]); let drilldown_result = key_changes::( @@ -636,7 +740,8 @@ mod tests { 100_000u64, None, &[42], - ).and_then(Result::from_iter); + ) + .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); } } diff --git a/substrate/primitives/state-machine/src/changes_trie/input.rs b/substrate/primitives/state-machine/src/changes_trie/input.rs index 85a8de0b78d819983365671c7514932a22d04839..426104295611630b9888f1934d5ab76e045a5b24 100644 --- a/substrate/primitives/state-machine/src/changes_trie/input.rs +++ b/substrate/primitives/state-machine/src/changes_trie/input.rs @@ -17,11 +17,8 @@ //! Different types of changes trie input pairs. -use codec::{Decode, Encode, Input, Output, Error}; -use crate::{ - StorageKey, StorageValue, - changes_trie::BlockNumber -}; +use crate::{changes_trie::BlockNumber, StorageKey, StorageValue}; +use codec::{Decode, Encode, Error, Input, Output}; use sp_core::storage::PrefixedStorageKey; /// Key of { changed key => set of extrinsic indices } mapping. @@ -140,7 +137,6 @@ impl DigestIndex { } } - impl Encode for DigestIndex { fn encode_to(&self, dest: &mut W) { dest.push_byte(2); diff --git a/substrate/primitives/state-machine/src/changes_trie/mod.rs b/substrate/primitives/state-machine/src/changes_trie/mod.rs index 105f3d7de6d393f401372b308704eec6ee012421..7fedff1f1e2b93ddf887206ab52727eb112f8e80 100644 --- a/substrate/primitives/state-machine/src/changes_trie/mod.rs +++ b/substrate/primitives/state-machine/src/changes_trie/mod.rs @@ -58,63 +58,86 @@ mod prune; mod storage; mod surface_iterator; -pub use self::build_cache::{BuildCache, CachedBuildData, CacheAction}; -pub use self::storage::InMemoryStorage; -pub use self::changes_iterator::{ - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, +pub use self::{ + build_cache::{BuildCache, CacheAction, CachedBuildData}, + changes_iterator::{ + key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, + }, + prune::prune, + storage::InMemoryStorage, }; -pub use self::prune::prune; -use std::collections::{HashMap, HashSet}; -use std::convert::TryInto; -use hash_db::{Hasher, Prefix}; -use num_traits::{One, Zero}; -use codec::{Decode, Encode}; -use sp_core; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::{MemoryDB, DBValue, TrieMut}; -use sp_trie::trie_types::TrieDBMut; use crate::{ - StorageKey, backend::Backend, - overlayed_changes::OverlayedChanges, changes_trie::{ build::prepare_input, - build_cache::{IncompleteCachedBuildData, IncompleteCacheAction}, + build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, }, + overlayed_changes::OverlayedChanges, + StorageKey, +}; +use codec::{Decode, Encode}; +use hash_db::{Hasher, Prefix}; +use num_traits::{One, Zero}; +use sp_core::{self, storage::PrefixedStorageKey}; +use sp_trie::{trie_types::TrieDBMut, DBValue, MemoryDB, TrieMut}; +use std::{ + collections::{HashMap, HashSet}, + convert::TryInto, }; /// Requirements for block number that can be used with changes tries. pub trait BlockNumber: - Send + Sync + 'static + - std::fmt::Display + - Clone + - From + TryInto + One + Zero + - PartialEq + Ord + - std::hash::Hash + - std::ops::Add + ::std::ops::Sub + - std::ops::Mul + ::std::ops::Div + - std::ops::Rem + - std::ops::AddAssign + - num_traits::CheckedMul + num_traits::CheckedSub + - Decode + Encode -{} - -impl BlockNumber for T where T: - Send + Sync + 'static + - std::fmt::Display + - Clone + - From + TryInto + One + Zero + - PartialEq + Ord + - std::hash::Hash + - std::ops::Add + ::std::ops::Sub + - std::ops::Mul + ::std::ops::Div + - std::ops::Rem + - std::ops::AddAssign + - num_traits::CheckedMul + num_traits::CheckedSub + - Decode + Encode, -{} + Send + + Sync + + 'static + + std::fmt::Display + + Clone + + From + + TryInto + + One + + Zero + + PartialEq + + Ord + + std::hash::Hash + + std::ops::Add + + ::std::ops::Sub + + std::ops::Mul + + ::std::ops::Div + + std::ops::Rem + + std::ops::AddAssign + + num_traits::CheckedMul + + num_traits::CheckedSub + + Decode + + Encode +{ +} + +impl BlockNumber for T where + T: Send + + Sync + + 'static + + std::fmt::Display + + Clone + + From + + TryInto + + One + + Zero + + PartialEq + + Ord + + std::hash::Hash + + std::ops::Add + + ::std::ops::Sub + + std::ops::Mul + + ::std::ops::Div + + std::ops::Rem + + std::ops::AddAssign + + num_traits::CheckedMul + + num_traits::CheckedSub + + Decode + + Encode +{ +} /// Block identifier that could be used to determine fork of this block. #[derive(Debug)] @@ -143,7 +166,11 @@ pub trait RootsStorage: Send + Sync { fn build_anchor(&self, hash: H::Out) -> Result, String>; /// Get changes trie root for the block with given number which is an ancestor (or the block /// itself) of the anchor_block (i.e. anchor_block.number >= block). - fn root(&self, anchor: &AnchorBlockId, block: Number) -> Result, String>; + fn root( + &self, + anchor: &AnchorBlockId, + block: Number, + ) -> Result, String>; } /// Changes trie storage. Provides access to trie roots and trie nodes. @@ -162,9 +189,13 @@ pub trait Storage: RootsStorage { } /// Changes trie storage -> trie backend essence adapter. -pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); +pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>( + pub &'a dyn Storage, +); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage + for TrieBackendStorageAdapter<'a, H, N> +{ type Overlay = sp_trie::MemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -188,26 +219,14 @@ pub struct ConfigurationRange<'a, N> { impl<'a, H, Number> State<'a, H, Number> { /// Create state with given config and storage. - pub fn new( - config: Configuration, - zero: Number, - storage: &'a dyn Storage, - ) -> Self { - Self { - config, - zero, - storage, - } + pub fn new(config: Configuration, zero: Number, storage: &'a dyn Storage) -> Self { + Self { config, zero, storage } } } impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { fn clone(&self) -> Self { - State { - config: self.config.clone(), - zero: self.zero.clone(), - storage: self.storage, - } + State { config: self.config.clone(), zero: self.zero.clone(), storage: self.storage } } } @@ -227,20 +246,24 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( parent_hash: H::Out, panic_on_storage_error: bool, ) -> Result, H::Out, CacheAction)>, ()> - where - H::Out: Ord + 'static + Encode, +where + H::Out: Ord + 'static + Encode, { /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. fn maybe_panic( res: std::result::Result, panic: bool, ) -> std::result::Result { - res.map(Ok) - .unwrap_or_else(|e| if panic { - panic!("changes trie: storage access is not allowed to fail within runtime: {:?}", e) + res.map(Ok).unwrap_or_else(|e| { + if panic { + panic!( + "changes trie: storage access is not allowed to fail within runtime: {:?}", + e + ) } else { Err(()) - }) + } + }) } // when storage isn't provided, changes tries aren't created @@ -255,11 +278,12 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( // prepare configuration range - we already know zero block. Current block may be the end block if configuration // has been changed in this block - let is_config_changed = match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { - Some(Some(new_config)) => new_config != &state.config.encode()[..], - Some(None) => true, - None => false, - }; + let is_config_changed = + match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { + Some(Some(new_config)) => new_config != &state.config.encode()[..], + Some(None) => true, + None => false, + }; let config_range = ConfigurationRange { config: &state.config, zero: state.zero.clone(), @@ -303,10 +327,8 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; } - cache_action = cache_action.insert( - Some(child_index.storage_key.clone()), - storage_changed_keys, - ); + cache_action = + cache_action.insert(Some(child_index.storage_key.clone()), storage_changed_keys); } if not_empty { child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); @@ -331,10 +353,7 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; } - cache_action = cache_action.insert( - None, - storage_changed_keys, - ); + cache_action = cache_action.insert(None, storage_changed_keys); } let cache_action = cache_action.complete(block, &root); @@ -350,20 +369,21 @@ fn prepare_cached_build_data( // because it'll never be used again for building other tries // => let's clear the cache if !config.config.is_digest_build_enabled() { - return IncompleteCacheAction::Clear; + return IncompleteCacheAction::Clear } // when this is the last block where current configuration is active // => let's clear the cache if config.end.as_ref() == Some(&block) { - return IncompleteCacheAction::Clear; + return IncompleteCacheAction::Clear } // we do not need to cache anything when top-level digest trie is created, because // it'll never be used again for building other tries // => let's clear the cache match config.config.digest_level_at_block(config.zero.clone(), block) { - Some((digest_level, _, _)) if digest_level == config.config.digest_levels => IncompleteCacheAction::Clear, + Some((digest_level, _, _)) if digest_level == config.config.digest_levels => + IncompleteCacheAction::Clear, _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), } } @@ -399,6 +419,9 @@ mod tests { fn cache_is_cleared_when_end_block_of_configuration_is_built() { let config = Configuration { digest_interval: 8, digest_levels: 2 }; let config_range = ConfigurationRange { zero: 0, end: Some(4u32), config: &config }; - assert_eq!(prepare_cached_build_data(config_range.clone(), 4u32), IncompleteCacheAction::Clear); + assert_eq!( + prepare_cached_build_data(config_range.clone(), 4u32), + IncompleteCacheAction::Clear + ); } } diff --git a/substrate/primitives/state-machine/src/changes_trie/prune.rs b/substrate/primitives/state-machine/src/changes_trie/prune.rs index 754e3893f966f5895dcc5fa3851cad8998dd5201..2ca540562b47fc138dc2c236dd0865f9f7b998eb 100644 --- a/substrate/primitives/state-machine/src/changes_trie/prune.rs +++ b/substrate/primitives/state-machine/src/changes_trie/prune.rs @@ -17,16 +17,20 @@ //! Changes trie pruning-related functions. +use crate::{ + changes_trie::{ + input::{ChildIndex, InputKey}, + storage::TrieBackendAdapter, + AnchorBlockId, BlockNumber, Storage, + }, + proving_backend::ProvingBackendRecorder, + trie_backend_essence::TrieBackendEssence, +}; +use codec::{Codec, Decode}; use hash_db::Hasher; -use sp_trie::Recorder; use log::warn; use num_traits::One; -use crate::proving_backend::ProvingBackendRecorder; -use crate::trie_backend_essence::TrieBackendEssence; -use crate::changes_trie::{AnchorBlockId, Storage, BlockNumber}; -use crate::changes_trie::storage::TrieBackendAdapter; -use crate::changes_trie::input::{ChildIndex, InputKey}; -use codec::{Decode, Codec}; +use sp_trie::Recorder; /// Prune obsolete changes tries. Pruning happens at the same block, where highest /// level digest is created. Pruning guarantees to save changes tries for last @@ -38,12 +42,14 @@ pub fn prune( last: Number, current_block: &AnchorBlockId, mut remove_trie_node: F, -) where H::Out: Codec { +) where + H::Out: Codec, +{ // delete changes trie for every block in range let mut block = first; loop { if block >= last.clone() + One::one() { - break; + break } let prev_block = block.clone(); @@ -56,7 +62,7 @@ pub fn prune( Err(error) => { // try to delete other tries warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); - continue; + continue }, }; let children_roots = { @@ -91,8 +97,9 @@ fn prune_trie( storage: &dyn Storage, root: H::Out, remove_trie_node: &mut F, -) where H::Out: Codec { - +) where + H::Out: Codec, +{ // enumerate all changes trie' keys, recording all nodes that have been 'touched' // (effectively - all changes trie nodes) let mut proof_recorder: Recorder = Default::default(); @@ -113,14 +120,13 @@ fn prune_trie( #[cfg(test)] mod tests { - use std::collections::HashSet; - use sp_trie::MemoryDB; - use sp_core::H256; - use crate::backend::insert_into_memory_db; - use crate::changes_trie::storage::InMemoryStorage; + use super::*; + use crate::{backend::insert_into_memory_db, changes_trie::storage::InMemoryStorage}; use codec::Encode; + use sp_core::H256; use sp_runtime::traits::BlakeTwo256; - use super::*; + use sp_trie::MemoryDB; + use std::collections::HashSet; fn prune_by_collect( storage: &dyn Storage, @@ -130,8 +136,9 @@ mod tests { ) -> HashSet { let mut pruned_trie_nodes = HashSet::new(); let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; - prune(storage, first, last, &anchor, - |node| { pruned_trie_nodes.insert(node); }); + prune(storage, first, last, &anchor, |node| { + pruned_trie_nodes.insert(node); + }); pruned_trie_nodes } @@ -139,28 +146,36 @@ mod tests { fn prune_works() { fn prepare_storage() -> InMemoryStorage { let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); - let child_key = ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() }.encode(); + let child_key = + ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() } + .encode(); let mut mdb1 = MemoryDB::::default(); - let root1 = insert_into_memory_db::( - &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); + let root1 = + insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]) + .unwrap(); let mut mdb2 = MemoryDB::::default(); let root2 = insert_into_memory_db::( &mut mdb2, vec![(vec![11], vec![21]), (vec![12], vec![22])], - ).unwrap(); + ) + .unwrap(); let mut mdb3 = MemoryDB::::default(); - let ch_root3 = insert_into_memory_db::( - &mut mdb3, vec![(vec![110], vec![120])]).unwrap(); - let root3 = insert_into_memory_db::(&mut mdb3, vec![ - (vec![13], vec![23]), - (vec![14], vec![24]), - (child_key, ch_root3.as_ref().encode()), - ]).unwrap(); + let ch_root3 = + insert_into_memory_db::(&mut mdb3, vec![(vec![110], vec![120])]) + .unwrap(); + let root3 = insert_into_memory_db::( + &mut mdb3, + vec![ + (vec![13], vec![23]), + (vec![14], vec![24]), + (child_key, ch_root3.as_ref().encode()), + ], + ) + .unwrap(); let mut mdb4 = MemoryDB::::default(); - let root4 = insert_into_memory_db::( - &mut mdb4, - vec![(vec![15], vec![25])], - ).unwrap(); + let root4 = + insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]) + .unwrap(); let storage = InMemoryStorage::new(); storage.insert(65, root1, mdb1); storage.insert(66, root2, mdb2); diff --git a/substrate/primitives/state-machine/src/changes_trie/storage.rs b/substrate/primitives/state-machine/src/changes_trie/storage.rs index e08fe36126c7b69874ba84a7024dce3e9f672e71..bd5e3a32b565782361d8a5a9e7d4f1e04f711924 100644 --- a/substrate/primitives/state-machine/src/changes_trie/storage.rs +++ b/substrate/primitives/state-machine/src/changes_trie/storage.rs @@ -17,22 +17,21 @@ //! Changes trie storage utilities. -use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::DBValue; -use sp_trie::MemoryDB; -use parking_lot::RwLock; use crate::{ - StorageKey, + changes_trie::{AnchorBlockId, BlockNumber, BuildCache, RootsStorage, Storage}, trie_backend_essence::TrieBackendStorage, - changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, + StorageKey, }; +use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use parking_lot::RwLock; +use sp_core::storage::PrefixedStorageKey; +use sp_trie::{DBValue, MemoryDB}; +use std::collections::{BTreeMap, HashMap, HashSet}; #[cfg(test)] use crate::backend::insert_into_memory_db; #[cfg(test)] -use crate::changes_trie::input::{InputPair, ChildIndex}; +use crate::changes_trie::input::{ChildIndex, InputPair}; /// In-memory implementation of changes trie storage. pub struct InMemoryStorage { @@ -55,10 +54,7 @@ impl InMemoryStorage { /// Creates storage from given in-memory database. pub fn with_db(mdb: MemoryDB) -> Self { Self { - data: RwLock::new(InMemoryStorageData { - roots: BTreeMap::new(), - mdb, - }), + data: RwLock::new(InMemoryStorageData { roots: BTreeMap::new(), mdb }), cache: BuildCache::new(), } } @@ -72,7 +68,7 @@ impl InMemoryStorage { pub fn with_proof(proof: Vec>) -> Self { use hash_db::HashDB; - let mut proof_db = MemoryDB::::default(); + let mut proof_db = MemoryDB::::default(); for item in proof { proof_db.insert(EMPTY_PREFIX, &item); } @@ -104,7 +100,8 @@ impl InMemoryStorage { let mut roots = BTreeMap::new(); for (storage_key, child_input) in children_inputs { for (block, pairs) in child_input { - let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); + let root = + insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); if let Some(root) = root { let ix = if let Some(ix) = top_inputs.iter().position(|v| v.0 == block) { @@ -129,17 +126,14 @@ impl InMemoryStorage { } InMemoryStorage { - data: RwLock::new(InMemoryStorageData { - roots, - mdb, - }), + data: RwLock::new(InMemoryStorageData { roots, mdb }), cache: BuildCache::new(), } } #[cfg(test)] pub fn clear_storage(&self) { - self.data.write().mdb = MemoryDB::default(); // use new to be more correct + self.data.write().mdb = MemoryDB::default(); // use new to be more correct } #[cfg(test)] @@ -165,13 +159,20 @@ impl InMemoryStorage { impl RootsStorage for InMemoryStorage { fn build_anchor(&self, parent_hash: H::Out) -> Result, String> { - self.data.read().roots.iter() + self.data + .read() + .roots + .iter() .find(|(_, v)| **v == parent_hash) .map(|(k, _)| AnchorBlockId { hash: parent_hash, number: k.clone() }) .ok_or_else(|| format!("Can't find associated number for block {:?}", parent_hash)) } - fn root(&self, _anchor_block: &AnchorBlockId, block: Number) -> Result, String> { + fn root( + &self, + _anchor_block: &AnchorBlockId, + block: Number, + ) -> Result, String> { Ok(self.data.read().roots.get(&block).cloned()) } } @@ -201,9 +202,9 @@ impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { } impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, +where + Number: BlockNumber, + H: Hasher, { type Overlay = MemoryDB; diff --git a/substrate/primitives/state-machine/src/changes_trie/surface_iterator.rs b/substrate/primitives/state-machine/src/changes_trie/surface_iterator.rs index 13da8511f3f968b325458654f818837c60013edf..509c02ee379ff96da31e769b03d9d211c9f1bfa2 100644 --- a/substrate/primitives/state-machine/src/changes_trie/surface_iterator.rs +++ b/substrate/primitives/state-machine/src/changes_trie/surface_iterator.rs @@ -21,8 +21,8 @@ //! of points at the terrain (mountains and valleys) inside this range that have to be drilled down to //! search for gems. +use crate::changes_trie::{BlockNumber, ConfigurationRange}; use num_traits::One; -use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// Returns surface iterator for given range of blocks. /// @@ -34,12 +34,8 @@ pub fn surface_iterator<'a, Number: BlockNumber>( begin: Number, end: Number, ) -> Result, String> { - let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( - config.clone(), - max.clone(), - begin.clone(), - end, - )?; + let (current, current_begin, digest_step, digest_level) = + lower_bound_max_digest(config.clone(), max.clone(), begin.clone(), end)?; Ok(SurfaceIterator { config, begin, @@ -89,7 +85,8 @@ impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { self.begin.clone(), next, ); - let (current, current_begin, digest_step, digest_level) = match max_digest_interval { + let (current, current_begin, digest_step, digest_level) = match max_digest_interval + { Err(err) => return Some(Err(err)), Ok(range) => range, }; @@ -114,14 +111,21 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( end: Number, ) -> Result<(Number, Number, u32, Option), String> { if end > max || begin > end { - return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)); + return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)) } - if begin <= config.zero || config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) { - return Err(format!("changes trie range is not covered by configuration: {}..{}/{}..{}", - begin, end, config.zero, match config.end.as_ref() { + if begin <= config.zero || + config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) + { + return Err(format!( + "changes trie range is not covered by configuration: {}..{}/{}..{}", + begin, + end, + config.zero, + match config.end.as_ref() { Some(config_end) => format!("{}", config_end), None => "None".into(), - })); + } + )) } let mut digest_level = 0u32; @@ -135,10 +139,16 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( let new_digest_level = digest_level + 1; let new_digest_step = digest_step * config.config.digest_interval; let new_digest_interval = config.config.digest_interval * { - if digest_interval == 0 { 1 } else { digest_interval } + if digest_interval == 0 { + 1 + } else { + digest_interval + } }; - let new_digest_begin = config.zero.clone() + ((current.clone() - One::one() - config.zero.clone()) - / new_digest_interval.into()) * new_digest_interval.into(); + let new_digest_begin = config.zero.clone() + + ((current.clone() - One::one() - config.zero.clone()) / + new_digest_interval.into()) * + new_digest_interval.into(); let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); let new_current = new_digest_begin.clone() + new_digest_interval.into(); @@ -150,16 +160,20 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( skewed_digest_end.clone(), ); if let Some(skewed_digest_start) = skewed_digest_start { - let skewed_digest_range = (skewed_digest_end.clone() - skewed_digest_start.clone()) - .try_into().ok() - .expect("skewed digest range is always <= max level digest range;\ - max level digest range always fits u32; qed"); + let skewed_digest_range = (skewed_digest_end.clone() - + skewed_digest_start.clone()) + .try_into() + .ok() + .expect( + "skewed digest range is always <= max level digest range;\ + max level digest range always fits u32; qed", + ); return Ok(( skewed_digest_end.clone(), skewed_digest_start, skewed_digest_range, None, - )); + )) } } } @@ -169,7 +183,7 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( if begin < new_digest_begin { current_begin = new_digest_begin; } - break; + break } // we can (and will) use this digest @@ -181,30 +195,24 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( // if current digest covers the whole range => no need to use next level digest if current_begin <= begin && new_digest_end >= end { - break; + break } } } - Ok(( - current, - current_begin, - digest_step, - Some(digest_level), - )) + Ok((current, current_begin, digest_step, Some(digest_level))) } #[cfg(test)] mod tests { - use crate::changes_trie::{Configuration}; use super::*; + use crate::changes_trie::Configuration; - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { config, zero, end: None } } #[test] @@ -213,13 +221,15 @@ mod tests { // when config activates at 0 assert_eq!( - lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64).unwrap(), + lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64) + .unwrap(), (192, 176, 16, Some(2)), ); // when config activates at 30 assert_eq!( - lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64).unwrap(), + lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64) + .unwrap(), (222, 206, 16, Some(2)), ); } @@ -230,40 +240,61 @@ mod tests { // when config activates at 0 assert_eq!( - surface_iterator( - configuration_range(&config, 0u64), - 100_000u64, - 40u64, - 180u64, - ).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64,) + .unwrap() + .collect::>(), vec![ - Ok((192, Some(2))), Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), - Ok((128, Some(2))), Ok((112, Some(2))), Ok((96, Some(2))), Ok((80, Some(2))), - Ok((64, Some(2))), Ok((48, Some(2))), + Ok((192, Some(2))), + Ok((176, Some(2))), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), ], ); // when config activates at 30 assert_eq!( - surface_iterator( - configuration_range(&config, 30u64), - 100_000u64, - 40u64, - 180u64, - ).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64,) + .unwrap() + .collect::>(), vec![ - Ok((190, Some(2))), Ok((174, Some(2))), Ok((158, Some(2))), Ok((142, Some(2))), Ok((126, Some(2))), - Ok((110, Some(2))), Ok((94, Some(2))), Ok((78, Some(2))), Ok((62, Some(2))), Ok((46, Some(2))), + Ok((190, Some(2))), + Ok((174, Some(2))), + Ok((158, Some(2))), + Ok((142, Some(2))), + Ok((126, Some(2))), + Ok((110, Some(2))), + Ok((94, Some(2))), + Ok((78, Some(2))), + Ok((62, Some(2))), + Ok((46, Some(2))), ], ); // when config activates at 0 AND max block is before next digest assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64) + .unwrap() + .collect::>(), vec![ - Ok((183, Some(0))), Ok((182, Some(0))), Ok((181, Some(0))), Ok((180, Some(1))), - Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), - Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), + Ok((183, Some(0))), + Ok((182, Some(0))), + Ok((181, Some(0))), + Ok((180, Some(1))), + Ok((176, Some(2))), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), ], ); } @@ -276,10 +307,19 @@ mod tests { // when config activates at 0 AND ends at 170 config_range.end = Some(170); assert_eq!( - surface_iterator(config_range, 100_000u64, 40u64, 170u64).unwrap().collect::>(), + surface_iterator(config_range, 100_000u64, 40u64, 170u64) + .unwrap() + .collect::>(), vec![ - Ok((170, None)), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), - Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), + Ok((170, None)), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), ], ); } diff --git a/substrate/primitives/state-machine/src/error.rs b/substrate/primitives/state-machine/src/error.rs index 2705e4623a7847c03e97541adf1ab30a1c73fbe9..acc5b6080c7a3aafc3f2156bd0c44380170f818f 100644 --- a/substrate/primitives/state-machine/src/error.rs +++ b/substrate/primitives/state-machine/src/error.rs @@ -16,7 +16,6 @@ // limitations under the License. /// State Machine Errors - use sp_std::fmt; /// State Machine Error bound. diff --git a/substrate/primitives/state-machine/src/ext.rs b/substrate/primitives/state-machine/src/ext.rs index d7d65b905f49c0550f4f23b8136ab14e1cf0a0dc..cf7cbd413b1f08a85796971f6f6f771831f2b9cc 100644 --- a/substrate/primitives/state-machine/src/ext.rs +++ b/substrate/primitives/state-machine/src/ext.rs @@ -18,25 +18,28 @@ //! Concrete externalities implementation. use crate::{ - StorageKey, StorageValue, OverlayedChanges, IndexOperation, - backend::Backend, overlayed_changes::OverlayedExtensions, + backend::Backend, overlayed_changes::OverlayedExtensions, IndexOperation, OverlayedChanges, + StorageKey, StorageValue, }; +use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; use sp_core::{ - storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, hexdisplay::HexDisplay, + storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, }; -use sp_trie::{trie_types::Layout, empty_child_trie_root}; -use sp_externalities::{ - Externalities, Extensions, Extension, ExtensionStore, -}; -use codec::{Decode, Encode, EncodeAppend}; +use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; +use sp_trie::{empty_child_trie_root, trie_types::Layout}; -use sp_std::{fmt, any::{Any, TypeId}, vec::Vec, vec, boxed::Box, cmp::Ordering}; -use crate::{warn, trace, log_error}; #[cfg(feature = "std")] use crate::changes_trie::State as ChangesTrieState; -use crate::StorageTransactionCache; +use crate::{log_error, trace, warn, StorageTransactionCache}; +use sp_std::{ + any::{Any, TypeId}, + boxed::Box, + cmp::Ordering, + fmt, vec, + vec::Vec, +}; #[cfg(feature = "std")] use std::error; @@ -46,7 +49,6 @@ const BENCHMARKING_FN: &str = "\ For that reason client started transactions before calling into runtime are not allowed. Without client transactions the loop condition garantuees the success of the tx close."; - #[cfg(feature = "std")] fn guard() -> sp_panic_handler::AbortGuard { sp_panic_handler::AbortGuard::force_abort() @@ -91,10 +93,10 @@ impl error::Error for Error { /// Wraps a read-only backend, call executor, and current overlayed changes. pub struct Ext<'a, H, N, B> - where - H: Hasher, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, +where + H: Hasher, + B: 'a + Backend, + N: crate::changes_trie::BlockNumber, { /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, @@ -114,12 +116,11 @@ pub struct Ext<'a, H, N, B> extensions: Option>, } - impl<'a, H, N, B> Ext<'a, H, N, B> - where - H: Hasher, - B: Backend, - N: crate::changes_trie::BlockNumber, +where + H: Hasher, + B: Backend, + N: crate::changes_trie::BlockNumber, { /// Create a new `Ext`. #[cfg(not(feature = "std"))] @@ -128,13 +129,7 @@ impl<'a, H, N, B> Ext<'a, H, N, B> storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, ) -> Self { - Ext { - overlay, - backend, - id: 0, - storage_transaction_cache, - _phantom: Default::default(), - } + Ext { overlay, backend, id: 0, storage_transaction_cache, _phantom: Default::default() } } /// Create a new `Ext` from overlayed changes and read-only backend @@ -176,7 +171,9 @@ where pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; - self.backend.pairs().iter() + self.backend + .pairs() + .iter() .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) .chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned()))) .collect::>() @@ -199,8 +196,11 @@ where fn storage(&self, key: &[u8]) -> Option { let _guard = guard(); - let result = self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| - self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); + let result = self + .overlay + .storage(key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); // NOTE: be careful about touching the key names – used outside substrate! trace!( @@ -222,7 +222,8 @@ where fn storage_hash(&self, key: &[u8]) -> Option> { let _guard = guard(); - let result = self.overlay + let result = self + .overlay .storage(key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); @@ -235,19 +236,15 @@ where result.map(|r| r.encode()) } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { let _guard = guard(); - let result = self.overlay + let result = self + .overlay .child_storage(child_info, key) .map(|x| x.map(|x| x.to_vec())) - .unwrap_or_else(|| - self.backend.child_storage(child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL) - ); + .unwrap_or_else(|| { + self.backend.child_storage(child_info, key).expect(EXT_NOT_ALLOWED_TO_FAIL) + }); trace!(target: "state", "{:04x}: GetChild({}) {}={:?}", self.id, @@ -259,19 +256,15 @@ where result } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { let _guard = guard(); - let result = self.overlay + let result = self + .overlay .child_storage(child_info, key) .map(|x| x.map(|x| H::hash(x))) - .unwrap_or_else(|| - self.backend.child_storage_hash(child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL) - ); + .unwrap_or_else(|| { + self.backend.child_storage_hash(child_info, key).expect(EXT_NOT_ALLOWED_TO_FAIL) + }); trace!(target: "state", "{:04x}: ChildHash({}) {}={:?}", self.id, @@ -299,16 +292,13 @@ where result } - fn exists_child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> bool { + fn exists_child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> bool { let _guard = guard(); let result = match self.overlay.child_storage(child_info, key) { Some(x) => x.is_some(), - _ => self.backend + _ => self + .backend .exists_child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL), }; @@ -323,7 +313,8 @@ where } fn next_storage_key(&self, key: &[u8]) -> Option { - let mut next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); + let mut next_backend_key = + self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); let mut overlay_changes = self.overlay.iter_after(key).peekable(); match (&next_backend_key, overlay_changes.peek()) { @@ -343,9 +334,10 @@ where // If the `backend_key` and `overlay_key` are equal, it means that we need // to search for the next backend key, because the overlay has overwritten // this key. - next_backend_key = self.backend.next_storage_key( - &overlay_key.0, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); + next_backend_key = self + .backend + .next_storage_key(&overlay_key.0) + .expect(EXT_NOT_ALLOWED_TO_FAIL); } } @@ -358,18 +350,13 @@ where } } - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - let mut next_backend_key = self.backend + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + let mut next_backend_key = self + .backend .next_child_storage_key(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); - let mut overlay_changes = self.overlay.child_iter_after( - child_info.storage_key(), - key - ).peekable(); + let mut overlay_changes = + self.overlay.child_iter_after(child_info.storage_key(), key).peekable(); match (&next_backend_key, overlay_changes.peek()) { (_, None) => next_backend_key, @@ -388,10 +375,10 @@ where // If the `backend_key` and `overlay_key` are equal, it means that we need // to search for the next backend key, because the overlay has overwritten // this key. - next_backend_key = self.backend.next_child_storage_key( - child_info, - &overlay_key.0, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); + next_backend_key = self + .backend + .next_child_storage_key(child_info, &overlay_key.0) + .expect(EXT_NOT_ALLOWED_TO_FAIL); } } @@ -408,7 +395,7 @@ where let _guard = guard(); if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to directly set child storage key"); - return; + return } // NOTE: be careful about touching the key names – used outside substrate! @@ -448,11 +435,7 @@ where self.overlay.set_child_storage(child_info, key, value); } - fn kill_child_storage( - &mut self, - child_info: &ChildInfo, - limit: Option, - ) -> (bool, u32) { + fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> (bool, u32) { trace!(target: "state", "{:04x}: KillChild({})", self.id, HexDisplay::from(&child_info.storage_key()), @@ -472,7 +455,7 @@ where if sp_core::storage::well_known_keys::starts_with_child_storage_key(prefix) { warn!(target: "trie", "Refuse to directly clear prefix that is part or contains of child storage key"); - return (false, 0); + return (false, 0) } self.mark_dirty(); @@ -498,11 +481,7 @@ where self.limit_remove_from_backend(Some(child_info), Some(prefix), limit) } - fn storage_append( - &mut self, - key: Vec, - value: Vec, - ) { + fn storage_append(&mut self, key: Vec, value: Vec) { trace!(target: "state", "{:04x}: Append {}={}", self.id, HexDisplay::from(&key), @@ -513,10 +492,9 @@ where self.mark_dirty(); let backend = &mut self.backend; - let current_value = self.overlay.value_mut_or_insert_with( - &key, - || backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() - ); + let current_value = self.overlay.value_mut_or_insert_with(&key, || { + backend.storage(&key).expect(EXT_NOT_ALLOWED_TO_FAIL).unwrap_or_default() + }); StorageAppend::new(current_value).append(value); } @@ -527,7 +505,7 @@ where self.id, HexDisplay::from(&root.as_ref()), ); - return root.encode(); + return root.encode() } let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache); @@ -535,10 +513,7 @@ where root.encode() } - fn child_storage_root( - &mut self, - child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec { let _guard = guard(); let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); @@ -546,9 +521,7 @@ where let root = self .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or_else( - || empty_child_trie_root::>() - ); + .unwrap_or_else(|| empty_child_trie_root::>()); trace!(target: "state", "{:04x}: ChildRoot({})(cached) {}", self.id, HexDisplay::from(&storage_key), @@ -587,9 +560,7 @@ where let root = self .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or_else( - || empty_child_trie_root::>() - ); + .unwrap_or_else(|| empty_child_trie_root::>()); trace!(target: "state", "{:04x}: ChildRoot({})(no_change) {}", self.id, HexDisplay::from(&storage_key.as_ref()), @@ -625,10 +596,8 @@ where index, HexDisplay::from(&hash), ); - self.overlay.add_transaction_index(IndexOperation::Renew { - extrinsic: index, - hash: hash.to_vec(), - }); + self.overlay + .add_transaction_index(IndexOperation::Renew { extrinsic: index, hash: hash.to_vec() }); } #[cfg(not(feature = "std"))] @@ -639,7 +608,8 @@ where #[cfg(feature = "std")] fn storage_changes_root(&mut self, mut parent_hash: &[u8]) -> Result>, ()> { let _guard = guard(); - if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root { + if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root + { trace!( target: "state", "{:04x}: ChangesRoot({})(cached) {:?}", @@ -653,13 +623,13 @@ where let root = self.overlay.changes_trie_root( self.backend, self.changes_trie_state.as_ref(), - Decode::decode(&mut parent_hash).map_err(|e| + Decode::decode(&mut parent_hash).map_err(|e| { trace!( target: "state", "Failed to decode changes root parent hash: {}", e, ) - )?, + })?, true, self.storage_transaction_cache, ); @@ -693,13 +663,15 @@ where for _ in 0..self.overlay.transaction_depth() { self.overlay.rollback_transaction().expect(BENCHMARKING_FN); } - self.overlay.drain_storage_changes( - self.backend, - #[cfg(feature = "std")] - None, - Default::default(), - self.storage_transaction_cache, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); + self.overlay + .drain_storage_changes( + self.backend, + #[cfg(feature = "std")] + None, + Default::default(), + self.storage_transaction_cache, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); self.overlay @@ -711,19 +683,24 @@ where for _ in 0..self.overlay.transaction_depth() { self.overlay.commit_transaction().expect(BENCHMARKING_FN); } - let changes = self.overlay.drain_storage_changes( - self.backend, - #[cfg(feature = "std")] - None, - Default::default(), - self.storage_transaction_cache, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); - self.backend.commit( - changes.transaction_storage_root, - changes.transaction, - changes.main_storage_changes, - changes.child_storage_changes, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); + let changes = self + .overlay + .drain_storage_changes( + self.backend, + #[cfg(feature = "std")] + None, + Default::default(), + self.storage_transaction_cache, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.backend + .commit( + changes.transaction_storage_root, + changes.transaction, + changes.main_storage_changes, + changes.child_storage_changes, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); self.overlay .enter_runtime() @@ -775,13 +752,13 @@ where self.backend.apply_to_keys_while(child_info, prefix, |key| { if num_deleted == limit { all_deleted = false; - return false; + return false } if let Some(num) = num_deleted.checked_add(1) { num_deleted = num; } else { all_deleted = false; - return false; + return false } if let Some(child_info) = child_info { self.overlay.set_child_storage(child_info, key.to_vec(), None); @@ -840,7 +817,7 @@ impl<'a> StorageAppend<'a> { "Failed to append value, resetting storage item to `[value]`.", ); value.encode() - } + }, }; } } @@ -896,7 +873,10 @@ where } } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if let Some(ref mut extensions) = self.extensions { if extensions.deregister(type_id) { Ok(()) @@ -912,24 +892,19 @@ where #[cfg(test)] mod tests { use super::*; + use crate::{ + changes_trie::{ + Configuration as ChangesTrieConfiguration, InMemoryStorage as TestChangesTrieStorage, + }, + InMemoryBackend, + }; + use codec::Encode; use hex_literal::hex; use num_traits::Zero; - use codec::Encode; use sp_core::{ - H256, - Blake2Hasher, map, - storage::{ - Storage, - StorageChild, - well_known_keys::EXTRINSIC_INDEX, - }, - }; - use crate::{ - changes_trie::{ - Configuration as ChangesTrieConfiguration, - InMemoryStorage as TestChangesTrieStorage, - }, InMemoryBackend, + storage::{well_known_keys::EXTRINSIC_INDEX, Storage, StorageChild}, + Blake2Hasher, H256, }; type TestBackend = InMemoryBackend; @@ -947,10 +922,7 @@ mod tests { } fn changes_trie_config() -> ChangesTrieConfiguration { - ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - } + ChangesTrieConfiguration { digest_interval: 0, digest_levels: 0 } } #[test] @@ -1013,8 +985,9 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - children_default: map![] - }.into(); + children_default: map![], + } + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1056,8 +1029,9 @@ mod tests { top: map![ vec![30] => vec![30] ], - children_default: map![] - }.into(); + children_default: map![], + } + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1087,7 +1061,8 @@ mod tests { child_info: child_info.to_owned(), } ], - }.into(); + } + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1131,7 +1106,8 @@ mod tests { child_info: child_info.to_owned(), } ], - }.into(); + } + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1142,10 +1118,7 @@ mod tests { ); assert_eq!(ext.child_storage(child_info, &[20]), None); - assert_eq!( - ext.child_storage_hash(child_info, &[20]), - None, - ); + assert_eq!(ext.child_storage_hash(child_info, &[20]), None,); assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![31])); assert_eq!( @@ -1170,7 +1143,8 @@ mod tests { child_info: child_info.to_owned(), } ], - }.into(); + } + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/substrate/primitives/state-machine/src/in_memory_backend.rs b/substrate/primitives/state-machine/src/in_memory_backend.rs index 4ee16dfd2f8a823b9558ef0eefc7849b6ff4a926..4daf1004a85fcd8c553c31d7dbbf1ddc420dea0f 100644 --- a/substrate/primitives/state-machine/src/in_memory_backend.rs +++ b/substrate/primitives/state-machine/src/in_memory_backend.rs @@ -18,13 +18,13 @@ //! State machine in memory backend. use crate::{ - StorageKey, StorageValue, StorageCollection, trie_backend::TrieBackend, backend::Backend, + backend::Backend, trie_backend::TrieBackend, StorageCollection, StorageKey, StorageValue, }; -use std::collections::{BTreeMap, HashMap}; -use hash_db::Hasher; -use sp_trie::{MemoryDB, empty_trie_root, Layout}; use codec::Codec; +use hash_db::Hasher; use sp_core::storage::{ChildInfo, Storage}; +use sp_trie::{empty_trie_root, Layout, MemoryDB}; +use std::collections::{BTreeMap, HashMap}; /// Create a new empty instance of in-memory backend. pub fn new_in_mem() -> TrieBackend, H> @@ -40,9 +40,7 @@ where H::Out: Codec + Ord, { /// Copy the state, with applied updates - pub fn update< - T: IntoIterator, StorageCollection)> - >( + pub fn update, StorageCollection)>>( &self, changes: T, ) -> Self { @@ -52,19 +50,16 @@ where } /// Insert values into backend trie. - pub fn insert< - T: IntoIterator, StorageCollection)> - >( + pub fn insert, StorageCollection)>>( &mut self, changes: T, ) { let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); let (root, transaction) = self.full_storage_root( top.iter().map(|(_, v)| v).flatten().map(|(k, v)| (&k[..], v.as_deref())), - child.iter() - .filter_map(|v| - v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) - ), + child.iter().filter_map(|v| { + v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) + }), ); self.apply_transaction(root, transaction); @@ -115,7 +110,9 @@ where fn from(inner: HashMap, BTreeMap>) -> Self { let mut backend = new_in_mem(); backend.insert( - inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), + inner + .into_iter() + .map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), ); backend } @@ -126,8 +123,11 @@ where H::Out: Codec + Ord, { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> - = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); + let mut inner: HashMap, BTreeMap> = inners + .children_default + .into_iter() + .map(|(_k, c)| (Some(c.child_info), c.data)) + .collect(); inner.insert(None, inners.top); inner.into() } @@ -144,16 +144,13 @@ where } } -impl From, StorageCollection)>> - for TrieBackend, H> +impl From, StorageCollection)>> for TrieBackend, H> where H::Out: Codec + Ord, { - fn from( - inner: Vec<(Option, StorageCollection)>, - ) -> Self { - let mut expanded: HashMap, BTreeMap> - = HashMap::new(); + fn from(inner: Vec<(Option, StorageCollection)>) -> Self { + let mut expanded: HashMap, BTreeMap> = + HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); for (key, value) in key_values { @@ -169,8 +166,8 @@ where #[cfg(test)] mod tests { use super::*; - use sp_runtime::traits::BlakeTwo256; use crate::backend::Backend; + use sp_runtime::traits::BlakeTwo256; /// Assert in memory backend with only child trie keys works as trie backend. #[test] @@ -178,15 +175,10 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let mut storage = storage.update( - vec![( - Some(child_info.clone()), - vec![(b"2".to_vec(), Some(b"3".to_vec()))] - )] - ); + let mut storage = storage + .update(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), - Some(b"3".to_vec())); + assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); } @@ -196,8 +188,10 @@ mod tests { let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); - storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); - storage.insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])]); + storage + .insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); + storage + .insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])]); assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); assert_eq!(storage.child_storage(&child_info, &b"1"[..]), Ok(Some(b"3".to_vec()))); diff --git a/substrate/primitives/state-machine/src/lib.rs b/substrate/primitives/state-machine/src/lib.rs index bc5b48f02db4e49bac6fbf42f550d973fef285b1..e2162df5cfd195c5a571f24771013ea19fac11cc 100644 --- a/substrate/primitives/state-machine/src/lib.rs +++ b/substrate/primitives/state-machine/src/lib.rs @@ -22,23 +22,23 @@ pub mod backend; #[cfg(feature = "std")] -mod in_memory_backend; +mod basic; #[cfg(feature = "std")] mod changes_trie; mod error; mod ext; #[cfg(feature = "std")] -mod testing; -#[cfg(feature = "std")] -mod basic; +mod in_memory_backend; pub(crate) mod overlayed_changes; #[cfg(feature = "std")] mod proving_backend; -mod trie_backend; -mod trie_backend_essence; -mod stats; #[cfg(feature = "std")] mod read_only; +mod stats; +#[cfg(feature = "std")] +mod testing; +mod trie_backend; +mod trie_backend_essence; #[cfg(feature = "std")] pub use std_reexport::*; @@ -46,7 +46,7 @@ pub use std_reexport::*; #[cfg(feature = "std")] pub use execution::*; #[cfg(feature = "std")] -pub use log::{debug, warn, error as log_error}; +pub use log::{debug, error as log_error, warn}; #[cfg(feature = "std")] pub use tracing::trace; @@ -55,12 +55,12 @@ pub use tracing::trace; #[cfg(not(feature = "std"))] #[macro_export] macro_rules! warn { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// In no_std we skip logs for state_machine, this macro @@ -68,12 +68,12 @@ macro_rules! warn { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! debug { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// In no_std we skip logs for state_machine, this macro @@ -81,12 +81,12 @@ macro_rules! debug { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! trace { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// In no_std we skip logs for state_machine, this macro @@ -94,12 +94,12 @@ macro_rules! trace { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! log_error { - (target: $target:expr, $($arg:tt)+) => ( + (target: $target:expr, $($arg:tt)+) => { () - ); - ($($arg:tt)+) => ( + }; + ($($arg:tt)+) => { () - ); + }; } /// Default error type to use with state machine trie backend. @@ -117,20 +117,19 @@ impl sp_std::fmt::Display for DefaultError { } } -pub use crate::overlayed_changes::{ - OverlayedChanges, StorageKey, StorageValue, - StorageCollection, ChildStorageCollection, - StorageChanges, StorageTransactionCache, - OffchainChangesCollection, - OffchainOverlayedChanges, - IndexOperation, +pub use crate::{ + backend::Backend, + ext::Ext, + overlayed_changes::{ + ChildStorageCollection, IndexOperation, OffchainChangesCollection, + OffchainOverlayedChanges, OverlayedChanges, StorageChanges, StorageCollection, StorageKey, + StorageTransactionCache, StorageValue, + }, + stats::{StateMachineStats, UsageInfo, UsageUnit}, + trie_backend::TrieBackend, + trie_backend_essence::{Storage, TrieBackendStorage}, }; -pub use crate::backend::Backend; -pub use crate::trie_backend_essence::{TrieBackendStorage, Storage}; -pub use crate::trie_backend::TrieBackend; -pub use crate::stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use error::{Error, ExecutionError}; -pub use crate::ext::Ext; #[cfg(not(feature = "std"))] mod changes_trie { @@ -143,45 +142,45 @@ mod changes_trie { #[cfg(feature = "std")] mod std_reexport { - pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; - pub use crate::testing::TestExternalities; - pub use crate::basic::BasicExternalities; - pub use crate::read_only::{ReadOnlyExternalities, InspectState}; - pub use crate::changes_trie::{ - AnchorBlockId as ChangesTrieAnchorBlockId, - State as ChangesTrieState, - Storage as ChangesTrieStorage, - RootsStorage as ChangesTrieRootsStorage, - InMemoryStorage as InMemoryChangesTrieStorage, - BuildCache as ChangesTrieBuildCache, - CacheAction as ChangesTrieCacheAction, - ConfigurationRange as ChangesTrieConfigurationRange, - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, - prune as prune_changes_tries, - disabled_state as disabled_changes_trie_state, - BlockNumber as ChangesTrieBlockNumber, + pub use crate::{ + basic::BasicExternalities, + changes_trie::{ + disabled_state as disabled_changes_trie_state, key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, + AnchorBlockId as ChangesTrieAnchorBlockId, BlockNumber as ChangesTrieBlockNumber, + BuildCache as ChangesTrieBuildCache, CacheAction as ChangesTrieCacheAction, + ConfigurationRange as ChangesTrieConfigurationRange, + InMemoryStorage as InMemoryChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, + State as ChangesTrieState, Storage as ChangesTrieStorage, + }, + error::{Error, ExecutionError}, + in_memory_backend::new_in_mem, + proving_backend::{ + create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + }, + read_only::{InspectState, ReadOnlyExternalities}, + testing::TestExternalities, }; - pub use crate::proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + pub use sp_trie::{ + trie_types::{Layout, TrieDBMut}, + DBValue, MemoryDB, StorageProof, TrieMut, }; - pub use crate::error::{Error, ExecutionError}; - pub use crate::in_memory_backend::new_in_mem; } #[cfg(feature = "std")] mod execution { use super::*; - use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; - use log::{warn, trace}; + use codec::{Codec, Decode, Encode}; use hash_db::Hasher; - use codec::{Decode, Encode, Codec}; + use log::{trace, warn}; use sp_core::{ - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, + hexdisplay::HexDisplay, + storage::ChildInfo, traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, + NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; - + use std::{collections::HashMap, fmt, panic::UnwindSafe, result}; const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions @@ -193,10 +192,8 @@ mod execution { pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Type of changes trie transaction. - pub type ChangesTrieTransaction = ( - MemoryDB, - ChangesTrieCacheAction<::Out, N>, - ); + pub type ChangesTrieTransaction = + (MemoryDB, ChangesTrieCacheAction<::Out, N>); /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; @@ -259,14 +256,14 @@ mod execution { self, ) -> ExecutionManager> { match self { - ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), + ExecutionStrategy::AlwaysWasm => + ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { warn!( "Consensus error between wasm {:?} and native {:?}. Using wasm.", - wasm_result, - native_result, + wasm_result, native_result, ); warn!(" Native result {:?}", native_result); warn!(" Wasm result {:?}", wasm_result); @@ -293,10 +290,10 @@ mod execution { /// The substrate state machine. pub struct StateMachine<'a, B, H, N, Exec> - where - H: Hasher, - B: Backend, - N: ChangesTrieBlockNumber, + where + H: Hasher, + B: Backend, + N: ChangesTrieBlockNumber, { backend: &'a B, exec: &'a Exec, @@ -310,7 +307,8 @@ mod execution { stats: StateMachineStats, } - impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> where + impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> + where H: Hasher, B: Backend, N: ChangesTrieBlockNumber, @@ -320,7 +318,8 @@ mod execution { } } - impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where + impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> + where H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, @@ -383,19 +382,19 @@ mod execution { self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( strategy.get_manager(), None, - ).map(NativeOrEncoded::into_encoded) + ) + .map(NativeOrEncoded::into_encoded) } fn execute_aux( &mut self, use_native: bool, native_call: Option, - ) -> ( - CallResult, - bool, - ) where + ) -> (CallResult, bool) + where R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> + UnwindSafe, + NC: FnOnce() -> result::Result> + + UnwindSafe, { let mut cache = StorageTransactionCache::default(); @@ -404,7 +403,9 @@ mod execution { None => &mut cache, }; - self.overlay.enter_runtime().expect("StateMachine is never called from the runtime; qed"); + self.overlay + .enter_runtime() + .expect("StateMachine is never called from the runtime; qed"); let mut ext = Ext::new( self.overlay, @@ -432,7 +433,8 @@ mod execution { native_call, ); - self.overlay.exit_runtime() + self.overlay + .exit_runtime() .expect("Runtime is not able to call this function in the overlay; qed"); trace!( @@ -450,27 +452,25 @@ mod execution { mut native_call: Option, on_consensus_failure: Handler, ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, { self.overlay.start_transaction(); let (result, was_native) = self.execute_aux(true, native_call.take()); if was_native { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux( - false, - native_call, - ); + let (wasm_result, _) = self.execute_aux(false, native_call); - if (result.is_ok() && wasm_result.is_ok() - && result.as_ref().ok() == wasm_result.as_ref().ok()) - || result.is_err() && wasm_result.is_err() + if (result.is_ok() && + wasm_result.is_ok() && result.as_ref().ok() == wasm_result.as_ref().ok()) || + result.is_err() && wasm_result.is_err() { result } else { @@ -486,25 +486,20 @@ mod execution { &mut self, mut native_call: Option, ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> + UnwindSafe, + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, { self.overlay.start_transaction(); - let (result, was_native) = self.execute_aux( - true, - native_call.take(), - ); + let (result, was_native) = self.execute_aux(true, native_call.take()); if !was_native || result.is_ok() { self.overlay.commit_transaction().expect(PROOF_CLOSE_TRANSACTION); result } else { self.overlay.rollback_transaction().expect(PROOF_CLOSE_TRANSACTION); - let (wasm_result, _) = self.execute_aux( - false, - native_call, - ); + let (wasm_result, _) = self.execute_aux(false, native_call); wasm_result } } @@ -523,40 +518,33 @@ mod execution { manager: ExecutionManager, mut native_call: Option, ) -> Result, Box> - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result> + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result> + + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, { let changes_tries_enabled = self.changes_trie_state.is_some(); self.overlay.set_collect_extrinsics(changes_tries_enabled); let result = { match manager { - ExecutionManager::Both(on_consensus_failure) => { - self.execute_call_with_both_strategy( - native_call.take(), - on_consensus_failure, - ) - }, - ExecutionManager::NativeElseWasm => { - self.execute_call_with_native_else_wasm_strategy( - native_call.take(), - ) - }, + ExecutionManager::Both(on_consensus_failure) => self + .execute_call_with_both_strategy(native_call.take(), on_consensus_failure), + ExecutionManager::NativeElseWasm => + self.execute_call_with_native_else_wasm_strategy(native_call.take()), ExecutionManager::AlwaysWasm(trust_level) => { let _abort_guard = match trust_level { BackendTrustLevel::Trusted => None, - BackendTrustLevel::Untrusted => Some(sp_panic_handler::AbortGuard::never_abort()), + BackendTrustLevel::Untrusted => + Some(sp_panic_handler::AbortGuard::never_abort()), }; self.execute_aux(false, native_call).0 }, - ExecutionManager::NativeWhenPossible => { - self.execute_aux(true, native_call).0 - }, + ExecutionManager::NativeWhenPossible => self.execute_aux(true, native_call).0, } }; @@ -582,7 +570,8 @@ mod execution { N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let trie_backend = backend.as_trie_backend() + let trie_backend = backend + .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_execution_on_trie_backend::<_, _, N, _, _>( trie_backend, @@ -704,14 +693,12 @@ mod execution { sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( always_untrusted_wasm(), None, - ).map(NativeOrEncoded::into_encoded) + ) + .map(NativeOrEncoded::into_encoded) } /// Generate storage read proof. - pub fn prove_read( - mut backend: B, - keys: I, - ) -> Result> + pub fn prove_read(mut backend: B, keys: I) -> Result> where B: Backend, H: Hasher, @@ -719,10 +706,9 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() - .ok_or_else( - || Box::new(ExecutionError::UnableToGenerateProof) as Box - )?; + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_read_on_trie_backend(trie_backend, keys) } @@ -739,9 +725,16 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend.as_trie_backend() + let trie_backend = backend + .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_range_read_with_size_on_trie_backend(trie_backend, child_info, prefix, size_limit, start_at) + prove_range_read_with_size_on_trie_backend( + trie_backend, + child_info, + prefix, + size_limit, + start_at, + ) } /// Generate range storage read proof on an existing trie backend. @@ -759,14 +752,22 @@ mod execution { { let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); let mut count = 0; - proving_backend.apply_to_key_values_while(child_info, prefix, start_at, |_key, _value| { - if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { - count += 1; - true - } else { - false - } - }, false).map_err(|e| Box::new(e) as Box)?; + proving_backend + .apply_to_key_values_while( + child_info, + prefix, + start_at, + |_key, _value| { + if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { + count += 1; + true + } else { + false + } + }, + false, + ) + .map_err(|e| Box::new(e) as Box)?; Ok((proving_backend.extract_proof(), count)) } @@ -783,7 +784,8 @@ mod execution { I: IntoIterator, I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() + let trie_backend = backend + .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; prove_child_read_on_trie_backend(trie_backend, child_info, keys) } @@ -923,7 +925,8 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - proving_backend.child_storage(child_info, key) + proving_backend + .child_storage(child_info, key) .map_err(|e| Box::new(e) as Box) } @@ -943,10 +946,16 @@ mod execution { H::Out: Ord + Codec, { let mut values = Vec::new(); - let result = proving_backend.apply_to_key_values_while(child_info, prefix, start_at, |key, value| { - values.push((key.to_vec(), value.to_vec())); - count.as_ref().map_or(true, |c| (values.len() as u32) < *c) - }, true); + let result = proving_backend.apply_to_key_values_while( + child_info, + prefix, + start_at, + |key, value| { + values.push((key.to_vec(), value.to_vec())); + count.as_ref().map_or(true, |c| (values.len() as u32) < *c) + }, + true, + ); match result { Ok(completed) => Ok((values, completed)), Err(e) => Err(Box::new(e) as Box), @@ -956,23 +965,22 @@ mod execution { #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use codec::Encode; - use super::*; - use super::ext::Ext; - use super::changes_trie::Configuration as ChangesTrieConfig; + use super::{changes_trie::Configuration as ChangesTrieConfig, ext::Ext, *}; + use crate::execution::CallResult; + use codec::{Decode, Encode}; use sp_core::{ - map, traits::{Externalities, RuntimeCode}, testing::TaskExecutor, + map, + storage::ChildInfo, + testing::TaskExecutor, + traits::{CodeExecutor, Externalities, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; use sp_runtime::traits::BlakeTwo256; - use std::{result, collections::HashMap, panic::UnwindSafe}; - use codec::Decode; - use sp_core::{ - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, - traits::CodeExecutor, + use std::{ + collections::{BTreeMap, HashMap}, + panic::UnwindSafe, + result, }; - use crate::execution::CallResult; - #[derive(Clone)] struct DummyCodeExecutor { @@ -1000,12 +1008,7 @@ mod tests { if self.change_changes_trie_config { ext.place_storage( sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), - Some( - ChangesTrieConfig { - digest_interval: 777, - digest_levels: 333, - }.encode() - ) + Some(ChangesTrieConfig { digest_interval: 777, digest_levels: 333 }.encode()), ); } @@ -1013,24 +1016,14 @@ mod tests { match (using_native, self.native_succeeds, self.fallback_succeeds, native_call) { (true, true, _, Some(call)) => { let res = sp_externalities::set_and_run_with_externalities(ext, || call()); - ( - res.map(NativeOrEncoded::Native).map_err(|_| 0), - true - ) - }, - (true, true, _, None) | (false, _, true, None) => { - ( - Ok( - NativeOrEncoded::Encoded( - vec![ - ext.storage(b"value1").unwrap()[0] + - ext.storage(b"value2").unwrap()[0] - ] - ) - ), - using_native - ) + (res.map(NativeOrEncoded::Native).map_err(|_| 0), true) }, + (true, true, _, None) | (false, _, true, None) => ( + Ok(NativeOrEncoded::Encoded(vec![ + ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0], + ])), + using_native, + ), _ => (Err(0), using_native), } } @@ -1069,13 +1062,9 @@ mod tests { TaskExecutor::new(), ); - assert_eq!( - state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), - vec![66], - ); + assert_eq!(state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), vec![66],); } - #[test] fn execute_works_with_native_else_wasm() { let backend = trie_backend::tests::test_trie(); @@ -1126,15 +1115,15 @@ mod tests { TaskExecutor::new(), ); - assert!( - state_machine.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + assert!(state_machine + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( ExecutionManager::Both(|we, _ne| { consensus_failed = true; we }), None, - ).is_err() - ); + ) + .is_err()); assert!(consensus_failed); } @@ -1158,7 +1147,8 @@ mod tests { "test", &[], &RuntimeCode::empty(), - ).unwrap(); + ) + .unwrap(); // check proof locally let local_result = execution_proof_check::( @@ -1170,7 +1160,8 @@ mod tests { "test", &[], &RuntimeCode::empty(), - ).unwrap(); + ) + .unwrap(); // check that both results are correct assert_eq!(remote_result, vec![66]); @@ -1210,7 +1201,9 @@ mod tests { overlay.commit_transaction().unwrap(); assert_eq!( - overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())) + overlay + .changes() + .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ b"abc".to_vec() => None.into(), @@ -1238,7 +1231,9 @@ mod tests { overlay.commit_transaction().unwrap(); assert_eq!( - overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())) + overlay + .changes() + .map(|(k, v)| (k.clone(), v.value().cloned())) .collect::>(), map![ b"abb".to_vec() => None.into(), @@ -1283,7 +1278,8 @@ mod tests { } assert_eq!( - overlay.children() + overlay + .children() .flat_map(|(iter, _child_info)| iter) .map(|(k, v)| (k.clone(), v.value().clone())) .collect::>(), @@ -1345,39 +1341,15 @@ mod tests { None, ); - ext.set_child_storage( - child_info, - b"abc".to_vec(), - b"def".to_vec() - ); - assert_eq!( - ext.child_storage( - child_info, - b"abc" - ), - Some(b"def".to_vec()) - ); - ext.kill_child_storage( - child_info, - None, - ); - assert_eq!( - ext.child_storage( - child_info, - b"abc" - ), - None - ); + ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec()); + assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec())); + ext.kill_child_storage(child_info, None); + assert_eq!(ext.child_storage(child_info, b"abc"), None); } #[test] fn append_storage_works() { - let reference_data = vec![ - b"data1".to_vec(), - b"2".to_vec(), - b"D3".to_vec(), - b"d4".to_vec(), - ]; + let reference_data = vec![b"data1".to_vec(), b"2".to_vec(), b"D3".to_vec(), b"d4".to_vec()]; let key = b"key".to_vec(); let mut state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); @@ -1393,10 +1365,7 @@ mod tests { ); ext.storage_append(key.clone(), reference_data[0].encode()); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![reference_data[0].clone()].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode()),); } overlay.start_transaction(); { @@ -1411,10 +1380,7 @@ mod tests { for i in reference_data.iter().skip(1) { ext.storage_append(key.clone(), i.encode()); } - assert_eq!( - ext.storage(key.as_slice()), - Some(reference_data.encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(reference_data.encode()),); } overlay.rollback_transaction().unwrap(); { @@ -1425,18 +1391,18 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![reference_data[0].clone()].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode()),); } } #[test] fn remove_with_append_then_rollback_appended_then_append_again() { - #[derive(codec::Encode, codec::Decode)] - enum Item { InitializationItem, DiscardedItem, CommitedItem } + enum Item { + InitializationItem, + DiscardedItem, + CommitedItem, + } let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); @@ -1468,10 +1434,7 @@ mod tests { None, ); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode()),); ext.storage_append(key.clone(), Item::DiscardedItem.encode()); @@ -1492,10 +1455,7 @@ mod tests { None, ); - assert_eq!( - ext.storage(key.as_slice()), - Some(vec![Item::InitializationItem].encode()), - ); + assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode()),); ext.storage_append(key.clone(), Item::CommitedItem.encode()); @@ -1503,7 +1463,6 @@ mod tests { ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), ); - } overlay.start_transaction(); @@ -1524,10 +1483,12 @@ mod tests { } fn test_compact(remote_proof: StorageProof, remote_root: &sp_core::H256) -> StorageProof { - let compact_remote_proof = remote_proof.into_compact_proof::( - remote_root.clone(), - ).unwrap(); - compact_remote_proof.to_storage_proof::(Some(remote_root)).unwrap().0 + let compact_remote_proof = + remote_proof.into_compact_proof::(remote_root.clone()).unwrap(); + compact_remote_proof + .to_storage_proof::(Some(remote_root)) + .unwrap() + .0 } #[test] @@ -1539,17 +1500,13 @@ mod tests { let remote_root = remote_backend.storage_root(std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); - // check proof locally - let local_result1 = read_proof_check::( - remote_root, - remote_proof.clone(), - &[b"value2"], - ).unwrap(); - let local_result2 = read_proof_check::( - remote_root, - remote_proof.clone(), - &[&[0xff]], - ).is_ok(); + // check proof locally + let local_result1 = + read_proof_check::(remote_root, remote_proof.clone(), &[b"value2"]) + .unwrap(); + let local_result2 = + read_proof_check::(remote_root, remote_proof.clone(), &[&[0xff]]) + .is_ok(); // check that results are correct assert_eq!( local_result1.into_iter().collect::>(), @@ -1559,45 +1516,42 @@ mod tests { // on child trie let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; - let remote_proof = prove_child_read( - remote_backend, - child_info, - &[b"value3"], - ).unwrap(); + let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), child_info, &[b"value3"], - ).unwrap(); + ) + .unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), child_info, &[b"value2"], - ).unwrap(); + ) + .unwrap(); assert_eq!( local_result1.into_iter().collect::>(), vec![(b"value3".to_vec(), Some(vec![142]))], ); - assert_eq!( - local_result2.into_iter().collect::>(), - vec![(b"value2".to_vec(), None)], - ); + assert_eq!(local_result2.into_iter().collect::>(), vec![(b"value2".to_vec(), None)],); } #[test] fn prove_read_with_size_limit_works() { let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); + let (proof, count) = + prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); // Alwasys contains at least some nodes. assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); let remote_backend = trie_backend::tests::test_trie(); - let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); + let (proof, count) = + prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); assert_eq!(count, 85); let (results, completed) = read_range_proof_check::( @@ -1607,23 +1561,20 @@ mod tests { None, Some(count), None, - ).unwrap(); + ) + .unwrap(); assert_eq!(results.len() as u32, count); assert_eq!(completed, false); // When checking without count limit, proof may actually contain extra values. - let (results, completed) = read_range_proof_check::( - remote_root, - proof, - None, - None, - None, - None, - ).unwrap(); + let (results, completed) = + read_range_proof_check::(remote_root, proof, None, None, None, None) + .unwrap(); assert_eq!(results.len() as u32, 101); assert_eq!(completed, false); let remote_backend = trie_backend::tests::test_trie(); - let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); + let (proof, count) = + prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); assert_eq!(count, 132); let (results, completed) = read_range_proof_check::( @@ -1633,7 +1584,8 @@ mod tests { None, None, None, - ).unwrap(); + ) + .unwrap(); assert_eq!(results.len() as u32, count); assert_eq!(completed, true); } @@ -1650,41 +1602,41 @@ mod tests { let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), vec![ - (&child_info1, vec![ - (&b"key1"[..], Some(&b"val2"[..])), - (&b"key2"[..], Some(&b"val3"[..])), - ].into_iter()), - (&child_info2, vec![ - (&b"key3"[..], Some(&b"val4"[..])), - (&b"key4"[..], Some(&b"val5"[..])), - ].into_iter()), - (&child_info3, vec![ - (&b"key5"[..], Some(&b"val6"[..])), - (&b"key6"[..], Some(&b"val7"[..])), - ].into_iter()), - ].into_iter(), + ( + &child_info1, + vec![(&b"key1"[..], Some(&b"val2"[..])), (&b"key2"[..], Some(&b"val3"[..]))] + .into_iter(), + ), + ( + &child_info2, + vec![(&b"key3"[..], Some(&b"val4"[..])), (&b"key4"[..], Some(&b"val5"[..]))] + .into_iter(), + ), + ( + &child_info3, + vec![(&b"key5"[..], Some(&b"val6"[..])), (&b"key6"[..], Some(&b"val7"[..]))] + .into_iter(), + ), + ] + .into_iter(), ); remote_backend.backend_storage_mut().consolidate(transaction); remote_backend.essence.set_root(remote_root.clone()); - let remote_proof = prove_child_read( - remote_backend, - &child_info1, - &[b"key1"], - ).unwrap(); + let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), &child_info1, &[b"key1"], - ).unwrap(); + ) + .unwrap(); assert_eq!(local_result1.len(), 1); assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(b"val2".to_vec()))); } #[test] fn child_storage_uuid() { - let child_info_1 = ChildInfo::new_default(b"sub_test1"); let child_info_2 = ChildInfo::new_default(b"sub_test2"); @@ -1782,16 +1734,19 @@ mod tests { ); let run_state_machine = |state_machine: &mut StateMachine<_, _, _, _>| { - state_machine.execute_using_consensus_failure_handler:: _, _, _>( - ExecutionManager::NativeWhenPossible, - Some(|| { - sp_externalities::with_externalities(|mut ext| { - ext.register_extension(DummyExt(2)).unwrap(); - }).unwrap(); - - Ok(()) - }), - ).unwrap(); + state_machine + .execute_using_consensus_failure_handler:: _, _, _>( + ExecutionManager::NativeWhenPossible, + Some(|| { + sp_externalities::with_externalities(|mut ext| { + ext.register_extension(DummyExt(2)).unwrap(); + }) + .unwrap(); + + Ok(()) + }), + ) + .unwrap(); }; run_state_machine(&mut state_machine); diff --git a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs index ae9584990e5faba6fbd138cd0ea5b5401994820b..1ffd569e2828b783a1d850b7a7fcac9048199ae6 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -17,17 +17,19 @@ //! Houses the code that implements the transactional overlay storage. -use super::{StorageKey, StorageValue, Extrinsics}; +use super::{Extrinsics, StorageKey, StorageValue}; -#[cfg(feature = "std")] -use std::collections::HashSet as Set; #[cfg(not(feature = "std"))] use sp_std::collections::btree_set::BTreeSet as Set; +#[cfg(feature = "std")] +use std::collections::HashSet as Set; -use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use sp_std::hash::Hash; -use smallvec::SmallVec; use crate::warn; +use smallvec::SmallVec; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + hash::Hash, +}; const PROOF_OVERLAY_NON_EMPTY: &str = "\ An OverlayValue is always created with at least one transaction and dropped as soon @@ -82,9 +84,7 @@ pub struct OverlayedEntry { impl Default for OverlayedEntry { fn default() -> Self { - Self { - transactions: SmallVec::new(), - } + Self { transactions: SmallVec::new() } } } @@ -142,7 +142,9 @@ impl OverlayedEntry { /// Unique list of extrinsic indices which modified the value. pub fn extrinsics(&self) -> BTreeSet { let mut set = BTreeSet::new(); - self.transactions.iter().for_each(|t| t.extrinsics.copy_extrinsics_into(&mut set)); + self.transactions + .iter() + .for_each(|t| t.extrinsics.copy_extrinsics_into(&mut set)); set } @@ -165,17 +167,9 @@ impl OverlayedEntry { /// /// This makes sure that the old version is not overwritten and can be properly /// rolled back when required. - fn set( - &mut self, - value: V, - first_write_in_tx: bool, - at_extrinsic: Option, - ) { + fn set(&mut self, value: V, first_write_in_tx: bool, at_extrinsic: Option) { if first_write_in_tx || self.transactions.is_empty() { - self.transactions.push(InnerValue { - value, - extrinsics: Default::default(), - }); + self.transactions.push(InnerValue { value, extrinsics: Default::default() }); } else { *self.value_mut() = value; } @@ -223,9 +217,9 @@ impl OverlayedMap { /// Get an optional reference to the value stored for the specified key. pub fn get(&self, key: &Q) -> Option<&OverlayedEntry> - where - K: sp_std::borrow::Borrow, - Q: Ord + ?Sized, + where + K: sp_std::borrow::Borrow, + Q: Ord + ?Sized, { self.changes.get(key) } @@ -233,24 +227,19 @@ impl OverlayedMap { /// Set a new value for the specified key. /// /// Can be rolled back or committed when called inside a transaction. - pub fn set( - &mut self, - key: K, - value: V, - at_extrinsic: Option, - ) { + pub fn set(&mut self, key: K, value: V, at_extrinsic: Option) { let overlayed = self.changes.entry(key.clone()).or_default(); overlayed.set(value, insert_dirty(&mut self.dirty_keys, key), at_extrinsic); } /// Get a list of all changes as seen by current transaction. - pub fn changes(&self) -> impl Iterator)> { + pub fn changes(&self) -> impl Iterator)> { self.changes.iter() } /// Get a list of all changes as seen by current transaction, consumes /// the overlay. - pub fn into_changes(self) -> impl Iterator)> { + pub fn into_changes(self) -> impl Iterator)> { self.changes.into_iter() } @@ -258,7 +247,7 @@ impl OverlayedMap { /// /// Panics: /// Panics if there are open transactions: `transaction_depth() > 0` - pub fn drain_commited(self) -> impl Iterator { + pub fn drain_commited(self) -> impl Iterator { assert!(self.transaction_depth() == 0, "Drain is not allowed with open transactions."); self.changes.into_iter().map(|(k, mut v)| (k, v.pop_transaction().value)) } @@ -276,7 +265,7 @@ impl OverlayedMap { /// Calling this while already inside the runtime will return an error. pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { if let ExecutionMode::Runtime = self.execution_mode { - return Err(AlreadyInRuntime); + return Err(AlreadyInRuntime) } self.execution_mode = ExecutionMode::Runtime; self.num_client_transactions = self.transaction_depth(); @@ -289,7 +278,7 @@ impl OverlayedMap { /// Calling this while already outside the runtime will return an error. pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { if let ExecutionMode::Client = self.execution_mode { - return Err(NotInRuntime); + return Err(NotInRuntime) } self.execution_mode = ExecutionMode::Client; if self.has_open_runtime_transactions() { @@ -341,11 +330,13 @@ impl OverlayedMap { } for key in self.dirty_keys.pop().ok_or(NoOpenTransaction)? { - let overlayed = self.changes.get_mut(&key).expect("\ + let overlayed = self.changes.get_mut(&key).expect( + "\ A write to an OverlayedValue is recorded in the dirty key set. Before an OverlayedValue is removed, its containing dirty set is removed. This function is only called for keys that are in the dirty set. qed\ - "); + ", + ); if rollback { overlayed.pop_transaction(); @@ -443,9 +434,12 @@ mod test { type Drained<'a> = Vec<(&'a [u8], Option<&'a [u8]>)>; fn assert_changes(is: &OverlayedChangeSet, expected: &Changes) { - let is: Changes = is.changes().map(|(k, v)| { - (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) - }).collect(); + let is: Changes = is + .changes() + .map(|(k, v)| { + (k.as_ref(), (v.value().map(AsRef::as_ref), v.extrinsics().into_iter().collect())) + }) + .collect(); assert_eq!(&is, expected); } @@ -453,7 +447,8 @@ mod test { let is = is.drain_commited().collect::>(); let expected = expected .iter() - .map(|(k, v)| (k.to_vec(), v.0.map(From::from))).collect::>(); + .map(|(k, v)| (k.to_vec(), v.0.map(From::from))) + .collect::>(); assert_eq!(is, expected); } @@ -461,7 +456,8 @@ mod test { let is = is.drain_commited().collect::>(); let expected = expected .iter() - .map(|(k, v)| (k.to_vec(), v.map(From::from))).collect::>(); + .map(|(k, v)| (k.to_vec(), v.map(From::from))) + .collect::>(); assert_eq!(is, expected); } @@ -474,10 +470,7 @@ mod test { changeset.set(b"key1".to_vec(), Some(b"val1".to_vec()), Some(2)); changeset.set(b"key0".to_vec(), Some(b"val0-1".to_vec()), Some(9)); - assert_drained(changeset, vec![ - (b"key0", Some(b"val0-1")), - (b"key1", Some(b"val1")), - ]); + assert_drained(changeset, vec![(b"key0", Some(b"val0-1")), (b"key1", Some(b"val1"))]); } #[test] @@ -599,10 +592,8 @@ mod test { changeset.rollback_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); - let rolled_back: Changes = vec![ - (b"key0", (Some(b"val0-1"), vec![1, 10])), - (b"key1", (Some(b"val1"), vec![1])), - ]; + let rolled_back: Changes = + vec![(b"key0", (Some(b"val0-1"), vec![1, 10])), (b"key1", (Some(b"val1"), vec![1]))]; assert_changes(&changeset, &rolled_back); assert_drained_changes(changeset, rolled_back); @@ -676,21 +667,27 @@ mod test { changeset.clear_where(|k, _| k.starts_with(b"del"), Some(5)); - assert_changes(&changeset, &vec![ - (b"del1", (None, vec![3, 5])), - (b"del2", (None, vec![4, 5])), - (b"key0", (Some(b"val0"), vec![1])), - (b"key1", (Some(b"val1"), vec![2])), - ]); + assert_changes( + &changeset, + &vec![ + (b"del1", (None, vec![3, 5])), + (b"del2", (None, vec![4, 5])), + (b"key0", (Some(b"val0"), vec![1])), + (b"key1", (Some(b"val1"), vec![2])), + ], + ); changeset.rollback_transaction().unwrap(); - assert_changes(&changeset, &vec![ - (b"del1", (Some(b"delval1"), vec![3])), - (b"del2", (Some(b"delval2"), vec![4])), - (b"key0", (Some(b"val0"), vec![1])), - (b"key1", (Some(b"val1"), vec![2])), - ]); + assert_changes( + &changeset, + &vec![ + (b"del1", (Some(b"delval1"), vec![3])), + (b"del2", (Some(b"delval2"), vec![4])), + (b"key0", (Some(b"val0"), vec![1])), + (b"key1", (Some(b"val1"), vec![2])), + ], + ); } #[test] @@ -708,29 +705,52 @@ mod test { changeset.set(b"key11".to_vec(), Some(b"val11".to_vec()), Some(11)); assert_eq!(changeset.changes_after(b"key0").next().unwrap().0, b"key1"); - assert_eq!(changeset.changes_after(b"key0").next().unwrap().1.value(), Some(&b"val1".to_vec())); + assert_eq!( + changeset.changes_after(b"key0").next().unwrap().1.value(), + Some(&b"val1".to_vec()) + ); assert_eq!(changeset.changes_after(b"key1").next().unwrap().0, b"key11"); - assert_eq!(changeset.changes_after(b"key1").next().unwrap().1.value(), Some(&b"val11".to_vec())); + assert_eq!( + changeset.changes_after(b"key1").next().unwrap().1.value(), + Some(&b"val11".to_vec()) + ); assert_eq!(changeset.changes_after(b"key11").next().unwrap().0, b"key2"); - assert_eq!(changeset.changes_after(b"key11").next().unwrap().1.value(), Some(&b"val2".to_vec())); + assert_eq!( + changeset.changes_after(b"key11").next().unwrap().1.value(), + Some(&b"val2".to_vec()) + ); assert_eq!(changeset.changes_after(b"key2").next().unwrap().0, b"key3"); - assert_eq!(changeset.changes_after(b"key2").next().unwrap().1.value(), Some(&b"val3".to_vec())); + assert_eq!( + changeset.changes_after(b"key2").next().unwrap().1.value(), + Some(&b"val3".to_vec()) + ); assert_eq!(changeset.changes_after(b"key3").next().unwrap().0, b"key4"); - assert_eq!(changeset.changes_after(b"key3").next().unwrap().1.value(), Some(&b"val4".to_vec())); + assert_eq!( + changeset.changes_after(b"key3").next().unwrap().1.value(), + Some(&b"val4".to_vec()) + ); assert_eq!(changeset.changes_after(b"key4").next(), None); changeset.rollback_transaction().unwrap(); assert_eq!(changeset.changes_after(b"key0").next().unwrap().0, b"key1"); - assert_eq!(changeset.changes_after(b"key0").next().unwrap().1.value(), Some(&b"val1".to_vec())); + assert_eq!( + changeset.changes_after(b"key0").next().unwrap().1.value(), + Some(&b"val1".to_vec()) + ); assert_eq!(changeset.changes_after(b"key1").next().unwrap().0, b"key2"); - assert_eq!(changeset.changes_after(b"key1").next().unwrap().1.value(), Some(&b"val2".to_vec())); + assert_eq!( + changeset.changes_after(b"key1").next().unwrap().1.value(), + Some(&b"val2".to_vec()) + ); assert_eq!(changeset.changes_after(b"key11").next().unwrap().0, b"key2"); - assert_eq!(changeset.changes_after(b"key11").next().unwrap().1.value(), Some(&b"val2".to_vec())); + assert_eq!( + changeset.changes_after(b"key11").next().unwrap().1.value(), + Some(&b"val2".to_vec()) + ); assert_eq!(changeset.changes_after(b"key2").next(), None); assert_eq!(changeset.changes_after(b"key3").next(), None); assert_eq!(changeset.changes_after(b"key4").next(), None); - } #[test] @@ -790,9 +810,7 @@ mod test { changeset.commit_transaction().unwrap(); assert_eq!(changeset.transaction_depth(), 0); - assert_drained(changeset, vec![ - (b"key0", Some(b"val0")), - ]); + assert_drained(changeset, vec![(b"key0", Some(b"val0"))]); } #[test] diff --git a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs index a261e084eeda927da19e5d0fc7627c900cbbca0c..a0558e06a380e126f8b5fc16b2ef445fe9ca6314 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/mod.rs @@ -20,36 +20,35 @@ mod changeset; mod offchain; +use self::changeset::OverlayedChangeSet; +use crate::{backend::Backend, stats::StateMachineStats}; pub use offchain::OffchainOverlayedChanges; -use crate::{ - backend::Backend, - stats::StateMachineStats, +use sp_std::{ + any::{Any, TypeId}, + boxed::Box, + vec::Vec, }; -use sp_std::{vec::Vec, any::{TypeId, Any}, boxed::Box}; -use self::changeset::OverlayedChangeSet; +use crate::{changes_trie::BlockNumber, DefaultError}; #[cfg(feature = "std")] use crate::{ + changes_trie::{build_changes_trie, State as ChangesTrieState}, ChangesTrieTransaction, - changes_trie::{ - build_changes_trie, - State as ChangesTrieState, - }, }; -use crate::changes_trie::BlockNumber; -#[cfg(feature = "std")] -use std::collections::{HashMap as Map, hash_map::Entry as MapEntry}; +use codec::{Decode, Encode}; +use hash_db::Hasher; +use sp_core::{ + offchain::OffchainOverlayedChange, + storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}, +}; +use sp_externalities::{Extension, Extensions}; #[cfg(not(feature = "std"))] use sp_std::collections::btree_map::{BTreeMap as Map, Entry as MapEntry}; use sp_std::collections::btree_set::BTreeSet; -use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; -use sp_core::offchain::OffchainOverlayedChange; -use hash_db::Hasher; -use crate::DefaultError; -use sp_externalities::{Extensions, Extension}; +#[cfg(feature = "std")] +use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; -pub use self::changeset::{OverlayedValue, NoOpenTransaction, AlreadyInRuntime, NotInRuntime}; +pub use self::changeset::{AlreadyInRuntime, NoOpenTransaction, NotInRuntime, OverlayedValue}; /// Changes that are made outside of extrinsics are marked with this index; pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; @@ -129,7 +128,7 @@ pub enum IndexOperation { extrinsic: u32, /// Referenced index hash. hash: Vec, - } + }, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -169,7 +168,9 @@ pub struct StorageChanges { #[cfg(feature = "std")] impl StorageChanges { /// Deconstruct into the inner values - pub fn into_inner(self) -> ( + pub fn into_inner( + self, + ) -> ( StorageCollection, ChildStorageCollection, OffchainChangesCollection, @@ -216,7 +217,9 @@ impl StorageTransactionCache Default for StorageTransactionCache { +impl Default + for StorageTransactionCache +{ fn default() -> Self { Self { transaction: None, @@ -231,7 +234,9 @@ impl Default for StorageTransactionCache } } -impl Default for StorageChanges { +impl Default + for StorageChanges +{ fn default() -> Self { Self { main_storage_changes: Default::default(), @@ -325,12 +330,10 @@ impl OverlayedChanges { self.stats.tally_write_overlay(size_write); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; - let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| - ( - top.spawn_child(), - child_info.clone() - ) - ); + let (changeset, info) = self + .children + .entry(storage_key) + .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); changeset.set(key, val, extrinsic_index); @@ -339,19 +342,14 @@ impl OverlayedChanges { /// Clear child storage of given storage key. /// /// Can be rolled back or committed when called inside a transaction. - pub(crate) fn clear_child_storage( - &mut self, - child_info: &ChildInfo, - ) { + pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; - let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| - ( - top.spawn_child(), - child_info.clone() - ) - ); + let (changeset, info) = self + .children + .entry(storage_key) + .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); changeset.clear_where(|_, _| true, extrinsic_index); @@ -367,20 +365,14 @@ impl OverlayedChanges { /// Removes all key-value pairs which keys share the given prefix. /// /// Can be rolled back or committed when called inside a transaction - pub(crate) fn clear_child_prefix( - &mut self, - child_info: &ChildInfo, - prefix: &[u8], - ) { + pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); let top = &self.top; - let (changeset, info) = self.children.entry(storage_key).or_insert_with(|| - ( - top.spawn_child(), - child_info.clone() - ) - ); + let (changeset, info) = self + .children + .entry(storage_key) + .or_insert_with(|| (top.spawn_child(), child_info.clone())); let updatable = info.try_update(child_info); debug_assert!(updatable); changeset.clear_where(|key, _| key.starts_with(prefix), extrinsic_index); @@ -417,11 +409,14 @@ impl OverlayedChanges { pub fn rollback_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.top.rollback_transaction()?; retain_map(&mut self.children, |_, (changeset, _)| { - changeset.rollback_transaction() + changeset + .rollback_transaction() .expect("Top and children changesets are started in lockstep; qed"); !changeset.is_empty() }); - self.offchain.overlay_mut().rollback_transaction() + self.offchain + .overlay_mut() + .rollback_transaction() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -433,10 +428,13 @@ impl OverlayedChanges { pub fn commit_transaction(&mut self) -> Result<(), NoOpenTransaction> { self.top.commit_transaction()?; for (_, (changeset, _)) in self.children.iter_mut() { - changeset.commit_transaction() + changeset + .commit_transaction() .expect("Top and children changesets are started in lockstep; qed"); } - self.offchain.overlay_mut().commit_transaction() + self.offchain + .overlay_mut() + .commit_transaction() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -448,10 +446,13 @@ impl OverlayedChanges { pub fn enter_runtime(&mut self) -> Result<(), AlreadyInRuntime> { self.top.enter_runtime()?; for (_, (changeset, _)) in self.children.iter_mut() { - changeset.enter_runtime() + changeset + .enter_runtime() .expect("Top and children changesets are entering runtime in lockstep; qed") } - self.offchain.overlay_mut().enter_runtime() + self.offchain + .overlay_mut() + .enter_runtime() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -463,10 +464,13 @@ impl OverlayedChanges { pub fn exit_runtime(&mut self) -> Result<(), NotInRuntime> { self.top.exit_runtime()?; for (_, (changeset, _)) in self.children.iter_mut() { - changeset.exit_runtime() + changeset + .exit_runtime() .expect("Top and children changesets are entering runtime in lockstep; qed"); } - self.offchain.overlay_mut().exit_runtime() + self.offchain + .overlay_mut() + .exit_runtime() .expect("Top and offchain changesets are started in lockstep; qed"); Ok(()) } @@ -477,19 +481,23 @@ impl OverlayedChanges { /// /// Panics: /// Panics if `transaction_depth() > 0` - fn drain_committed(&mut self) -> ( - impl Iterator)>, - impl Iterator)>, ChildInfo))>, + fn drain_committed( + &mut self, + ) -> ( + impl Iterator)>, + impl Iterator< + Item = ( + StorageKey, + (impl Iterator)>, ChildInfo), + ), + >, ) { use sp_std::mem::take; ( take(&mut self.top).drain_commited(), - take(&mut self.children).into_iter() - .map(|(key, (val, info))| ( - key, - (val.drain_commited(), info) - ) - ), + take(&mut self.children) + .into_iter() + .map(|(key, (val, info))| (key, (val.drain_commited(), info))), ) } @@ -499,24 +507,29 @@ impl OverlayedChanges { /// /// Panics: /// Panics if `transaction_depth() > 0` - pub fn offchain_drain_committed(&mut self) -> impl Iterator { + pub fn offchain_drain_committed( + &mut self, + ) -> impl Iterator { self.offchain.drain() } /// Get an iterator over all child changes as seen by the current transaction. - pub fn children(&self) - -> impl Iterator, &ChildInfo)> { + pub fn children( + &self, + ) -> impl Iterator, &ChildInfo)> { self.children.iter().map(|(_, v)| (v.0.changes(), &v.1)) } /// Get an iterator over all top changes as been by the current transaction. - pub fn changes(&self) -> impl Iterator { + pub fn changes(&self) -> impl Iterator { self.top.changes() } /// Get an optional iterator over all child changes stored under the supplied key. - pub fn child_changes(&self, key: &[u8]) - -> Option<(impl Iterator, &ChildInfo)> { + pub fn child_changes( + &self, + key: &[u8], + ) -> Option<(impl Iterator, &ChildInfo)> { self.children.get(key).map(|(overlay, info)| (overlay.changes(), info)) } @@ -527,16 +540,16 @@ impl OverlayedChanges { /// Convert this instance with all changes into a [`StorageChanges`] instance. #[cfg(feature = "std")] - pub fn into_storage_changes< - B: Backend, H: Hasher, N: BlockNumber - >( + pub fn into_storage_changes, H: Hasher, N: BlockNumber>( mut self, backend: &B, changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, ) -> Result, DefaultError> - where H::Out: Ord + Encode + 'static { + where + H::Out: Ord + Encode + 'static, + { self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) } @@ -544,35 +557,34 @@ impl OverlayedChanges { pub fn drain_storage_changes, H: Hasher, N: BlockNumber>( &mut self, backend: &B, - #[cfg(feature = "std")] - changes_trie_state: Option<&ChangesTrieState>, + #[cfg(feature = "std")] changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: &mut StorageTransactionCache, ) -> Result, DefaultError> - where H::Out: Ord + Encode + 'static { + where + H::Out: Ord + Encode + 'static, + { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { self.storage_root(backend, &mut cache); } - let (transaction, transaction_storage_root) = cache.transaction.take() + let (transaction, transaction_storage_root) = cache + .transaction + .take() .and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr))) .expect("Transaction was be generated as part of `storage_root`; qed"); // If the transaction does not exist, we generate it. #[cfg(feature = "std")] if cache.changes_trie_transaction.is_none() { - self.changes_trie_root( - backend, - changes_trie_state, - parent_hash, - false, - &mut cache, - ).map_err(|_| "Failed to generate changes trie transaction")?; + self.changes_trie_root(backend, changes_trie_state, parent_hash, false, &mut cache) + .map_err(|_| "Failed to generate changes trie transaction")?; } #[cfg(feature = "std")] - let changes_trie_transaction = cache.changes_trie_transaction + let changes_trie_transaction = cache + .changes_trie_transaction .take() .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); @@ -584,7 +596,9 @@ impl OverlayedChanges { Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), - child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), + child_storage_changes: child_storage_changes + .map(|(sk, it)| (sk, it.0.collect())) + .collect(), offchain_storage_changes, transaction, transaction_storage_root, @@ -614,7 +628,8 @@ impl OverlayedChanges { true => Some( self.storage(EXTRINSIC_INDEX) .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) - .unwrap_or(NO_EXTRINSIC_INDEX)), + .unwrap_or(NO_EXTRINSIC_INDEX), + ), false => None, } } @@ -628,13 +643,13 @@ impl OverlayedChanges { backend: &B, cache: &mut StorageTransactionCache, ) -> H::Out - where H::Out: Ord + Encode, + where + H::Out: Ord + Encode, { let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); - let child_delta = self.children() - .map(|(changes, info)| (info, changes.map( - |(k, v)| (&k[..], v.value().map(|v| &v[..])) - ))); + let child_delta = self.children().map(|(changes, info)| { + (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) + }); let (root, transaction) = backend.full_storage_root(delta, child_delta); @@ -659,14 +674,18 @@ impl OverlayedChanges { parent_hash: H::Out, panic_on_storage_error: bool, cache: &mut StorageTransactionCache, - ) -> Result, ()> where H::Out: Ord + Encode + 'static { + ) -> Result, ()> + where + H::Out: Ord + Encode + 'static, + { build_changes_trie::<_, H, N>( backend, changes_trie_state, self, parent_hash, panic_on_storage_error, - ).map(|r| { + ) + .map(|r| { let root = r.as_ref().map(|r| r.1).clone(); cache.changes_trie_transaction = Some(r.map(|(db, _, cache)| (db, cache))); cache.changes_trie_transaction_storage_root = Some(root); @@ -685,7 +704,7 @@ impl OverlayedChanges { pub fn child_iter_after( &self, storage_key: &[u8], - key: &[u8] + key: &[u8], ) -> impl Iterator { self.children .get(storage_key) @@ -716,18 +735,18 @@ impl OverlayedChanges { #[cfg(feature = "std")] fn retain_map(map: &mut Map, f: F) - where - K: std::cmp::Eq + std::hash::Hash, - F: FnMut(&K, &mut V) -> bool, +where + K: std::cmp::Eq + std::hash::Hash, + F: FnMut(&K, &mut V) -> bool, { map.retain(f); } #[cfg(not(feature = "std"))] fn retain_map(map: &mut Map, mut f: F) - where - K: Ord, - F: FnMut(&K, &mut V) -> bool, +where + K: Ord, + F: FnMut(&K, &mut V) -> bool, { let old = sp_std::mem::replace(map, Map::default()); for (k, mut v) in old.into_iter() { @@ -799,18 +818,13 @@ impl<'a> OverlayedExtensions<'a> { #[cfg(test)] mod tests { - use hex_literal::hex; - use sp_core::{Blake2Hasher, traits::Externalities}; - use crate::InMemoryBackend; - use crate::ext::Ext; use super::*; + use crate::{ext::Ext, InMemoryBackend}; + use hex_literal::hex; + use sp_core::{traits::Externalities, Blake2Hasher}; use std::collections::BTreeMap; - fn assert_extrinsics( - overlay: &OverlayedChangeSet, - key: impl AsRef<[u8]>, - expected: Vec, - ) { + fn assert_extrinsics(overlay: &OverlayedChangeSet, key: impl AsRef<[u8]>, expected: Vec) { assert_eq!( overlay.get(key.as_ref()).unwrap().extrinsics().into_iter().collect::>(), expected @@ -863,13 +877,16 @@ mod tests { state.commit_transaction().unwrap(); } let offchain_data: Vec<_> = state.offchain_drain_committed().collect(); - let expected: Vec<_> = expected.into_iter().map(|(key, value)| { - let change = match value { - Some(value) => OffchainOverlayedChange::SetValue(value), - None => OffchainOverlayedChange::Remove, - }; - ((STORAGE_PREFIX.to_vec(), key), change) - }).collect(); + let expected: Vec<_> = expected + .into_iter() + .map(|(key, value)| { + let change = match value { + Some(value) => OffchainOverlayedChange::SetValue(value), + None => OffchainOverlayedChange::Remove, + }; + ((STORAGE_PREFIX.to_vec(), key), change) + }) + .collect(); assert_eq!(offchain_data, expected); } @@ -904,7 +921,6 @@ mod tests { check_offchain_content(&overlayed, 0, vec![(key.clone(), None)]); } - #[test] fn overlayed_storage_root_works() { let initial: BTreeMap<_, _> = vec![ @@ -912,7 +928,9 @@ mod tests { (b"dog".to_vec(), b"puppyXXX".to_vec()), (b"dogglesworth".to_vec(), b"catXXX".to_vec()), (b"doug".to_vec(), b"notadog".to_vec()), - ].into_iter().collect(); + ] + .into_iter() + .collect(); let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges::default(); overlay.set_collect_extrinsics(false); @@ -935,7 +953,8 @@ mod tests { crate::changes_trie::disabled_state::<_, u64>(), None, ); - const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); assert_eq!(&ext.storage_root()[..], &ROOT); } diff --git a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs index 4128be24bc546d4c74d692e66276ae8ad126a4cc..9603426fa55175149f49758b4c4c8a218e45337a 100644 --- a/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs +++ b/substrate/primitives/state-machine/src/overlayed_changes/offchain.rs @@ -17,9 +17,9 @@ //! Overlayed changes for offchain indexing. +use super::changeset::OverlayedMap; use sp_core::offchain::OffchainOverlayedChange; use sp_std::prelude::Vec; -use super::changeset::OverlayedMap; /// In-memory storage for offchain workers recoding changes for the actual offchain storage implementation. #[derive(Debug, Clone, Default)] @@ -52,11 +52,9 @@ impl OffchainOverlayedChanges { /// Remove a key and its associated value from the offchain database. pub fn remove(&mut self, prefix: &[u8], key: &[u8]) { - let _ = self.0.set( - (prefix.to_vec(), key.to_vec()), - OffchainOverlayedChange::Remove, - None, - ); + let _ = self + .0 + .set((prefix.to_vec(), key.to_vec()), OffchainOverlayedChange::Remove, None); } /// Set the value associated with a key under a prefix to the value provided. @@ -80,7 +78,9 @@ impl OffchainOverlayedChanges { } /// Mutable reference to inner change set. - pub fn overlay_mut(&mut self) -> &mut OverlayedMap<(Vec, Vec), OffchainOverlayedChange> { + pub fn overlay_mut( + &mut self, + ) -> &mut OverlayedMap<(Vec, Vec), OffchainOverlayedChange> { &mut self.0 } } @@ -120,10 +120,10 @@ mod test { let mut iter = ooc.into_iter(); assert_eq!( iter.next(), - Some( - ((STORAGE_PREFIX.to_vec(), b"ppp".to_vec()), - OffchainOverlayedChange::SetValue(b"rrr".to_vec())) - ) + Some(( + (STORAGE_PREFIX.to_vec(), b"ppp".to_vec()), + OffchainOverlayedChange::SetValue(b"rrr".to_vec()) + )) ); assert_eq!(iter.next(), None); } diff --git a/substrate/primitives/state-machine/src/proving_backend.rs b/substrate/primitives/state-machine/src/proving_backend.rs index 5275aa82521c5b794c85875a506f0bce11e8f2be..3a242313a65c78ba27af37c40cda9e1c626ec7d2 100644 --- a/substrate/primitives/state-machine/src/proving_backend.rs +++ b/substrate/primitives/state-machine/src/proving_backend.rs @@ -17,20 +17,28 @@ //! Proving state machine backend. -use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; -use parking_lot::RwLock; -use codec::{Decode, Codec, Encode}; +use crate::{ + trie_backend::TrieBackend, + trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, + Backend, DBValue, Error, ExecutionError, +}; +use codec::{Codec, Decode, Encode}; +use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; use log::debug; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; +use parking_lot::RwLock; +use sp_core::storage::ChildInfo; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProof, + empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, + MemoryDB, StorageProof, +}; +pub use sp_trie::{ + trie_types::{Layout, TrieError}, + Recorder, +}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, }; -pub use sp_trie::{Recorder, trie_types::{Layout, TrieError}}; -use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; -use crate::{Error, ExecutionError, Backend, DBValue}; -use sp_core::storage::ChildInfo; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -39,18 +47,15 @@ pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Has } impl<'a, S, H> ProvingBackendRecorder<'a, S, H> - where - S: TrieBackendStorage, - H: Hasher, - H::Out: Codec, +where + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, { /// Produce proof for a key query. pub fn storage(&mut self, key: &[u8]) -> Result>, String> { let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let map_e = |e| format!("Trie lookup error: {}", e); @@ -59,25 +64,24 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> self.backend.root(), key, &mut *self.proof_recorder, - ).map_err(map_e) + ) + .map_err(map_e) } /// Produce proof for a child key query. pub fn child_storage( &mut self, child_info: &ChildInfo, - key: &[u8] + key: &[u8], ) -> Result>, String> { let storage_key = child_info.storage_key(); - let root = self.storage(storage_key)? + let root = self + .storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) .unwrap_or_else(|| empty_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let map_e = |e| format!("Trie lookup error: {}", e); @@ -86,17 +90,15 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> &eph, &root.as_ref(), key, - &mut *self.proof_recorder - ).map_err(map_e) + &mut *self.proof_recorder, + ) + .map_err(map_e) } /// Produce proof for the whole backend. pub fn record_all_keys(&mut self) { let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let mut iter = move || -> Result<(), Box>> { let root = self.backend.root(); @@ -150,13 +152,14 @@ impl ProofRecorder { /// encoded proof. pub fn estimate_encoded_size(&self) -> usize { let inner = self.inner.read(); - inner.encoded_size - + codec::Compact(inner.records.len() as u32).encoded_size() + inner.encoded_size + codec::Compact(inner.records.len() as u32).encoded_size() } /// Convert into a [`StorageProof`]. pub fn to_storage_proof(&self) -> StorageProof { - let trie_nodes = self.inner.read() + let trie_nodes = self + .inner + .read() .records .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) @@ -175,7 +178,7 @@ impl ProofRecorder { /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. -pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ( +pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher>( TrieBackend, H>, ); @@ -186,7 +189,8 @@ pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hashe } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> - where H::Out: Codec +where + H::Out: Codec, { /// Create new proving backend. pub fn new(backend: &'a TrieBackend) -> Self { @@ -201,10 +205,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); - let recorder = ProofRecorderBackend { - backend: essence.backend_storage(), - proof_recorder, - }; + let recorder = ProofRecorderBackend { backend: essence.backend_storage(), proof_recorder }; ProvingBackend(TrieBackend::new(recorder, root)) } @@ -229,7 +230,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { - return Ok(v); + return Ok(v) } let backend_value = self.backend.get(key, prefix)?; @@ -247,10 +248,10 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug } impl<'a, S, H> Backend for ProvingBackend<'a, S, H> - where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - H::Out: Ord + Codec, +where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + H::Out: Ord + Codec, { type Error = String; type Transaction = S::Overlay; @@ -314,7 +315,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> prefix: &[u8], f: F, ) { - self.0.for_child_keys_with_prefix( child_info, prefix, f) + self.0.for_child_keys_with_prefix(child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -325,30 +326,32 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.keys(prefix) } - fn child_keys( - &self, - child_info: &ChildInfo, - prefix: &[u8], - ) -> Vec> { + fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec> { self.0.child_keys(child_info, prefix) } fn storage_root<'b>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { self.0.storage_root(delta) } fn child_storage_root<'b>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { self.0.child_storage_root(child_info, delta) } - fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) { } + fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::stats::UsageInfo { self.0.usage_info() @@ -375,15 +378,16 @@ where #[cfg(test)] mod tests { - use crate::InMemoryBackend; - use crate::trie_backend::tests::test_trie; use super::*; - use crate::proving_backend::create_proof_check_backend; - use sp_trie::PrefixedMemoryDB; + use crate::{ + proving_backend::create_proof_check_backend, trie_backend::tests::test_trie, + InMemoryBackend, + }; use sp_runtime::traits::BlakeTwo256; + use sp_trie::PrefixedMemoryDB; fn test_proving<'a>( - trie_backend: &'a TrieBackend,BlakeTwo256>, + trie_backend: &'a TrieBackend, BlakeTwo256>, ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { ProvingBackend::new(trie_backend) } @@ -407,7 +411,7 @@ mod tests { use sp_core::H256; let result = create_proof_check_backend::( H256::from_low_u64_be(1), - StorageProof::empty() + StorageProof::empty(), ); assert!(result.is_err()); } @@ -443,7 +447,8 @@ mod tests { let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); } @@ -455,48 +460,38 @@ mod tests { let child_info_2 = &child_info_2; let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_1.clone()), - (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_2.clone()), - (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; - let in_memory_root = in_memory.full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k|(k, std::iter::empty())) - ).0; - (0..64).for_each(|i| assert_eq!( - in_memory.storage(&[i]).unwrap().unwrap(), - vec![i] - )); - (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), - vec![i] - )); - (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), - vec![i] - )); + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + ) + .0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + (28..65).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) + }); + (10..15).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) + }); let trie = in_memory.as_trie_backend().unwrap(); let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!( - trie.storage(&[i]).unwrap().unwrap(), - vec![i] - )); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); let proving = ProvingBackend::new(trie); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert!(proof_check.storage(&[0]).is_err()); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); // note that it is include in root because proof close @@ -507,14 +502,9 @@ mod tests { assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - assert_eq!( - proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), - vec![64] - ); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!(proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64]); } #[test] @@ -522,15 +512,14 @@ mod tests { let trie_backend = test_trie(); let backend = test_proving(&trie_backend); - let check_estimation = |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { - let storage_proof = backend.extract_proof(); - let estimation = backend.0.essence() - .backend_storage() - .proof_recorder - .estimate_encoded_size(); + let check_estimation = + |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { + let storage_proof = backend.extract_proof(); + let estimation = + backend.0.essence().backend_storage().proof_recorder.estimate_encoded_size(); - assert_eq!(storage_proof.encoded_size(), estimation); - }; + assert_eq!(storage_proof.encoded_size(), estimation); + }; assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); check_estimation(&backend); diff --git a/substrate/primitives/state-machine/src/read_only.rs b/substrate/primitives/state-machine/src/read_only.rs index 01e1fb6b5b2f57d87802c8e37d284d44401309d2..5b7d568b0311e0827689b8e0290b337d9486a53f 100644 --- a/substrate/primitives/state-machine/src/read_only.rs +++ b/substrate/primitives/state-machine/src/read_only.rs @@ -17,17 +17,18 @@ //! Read-only version of Externalities. -use std::{ - any::{TypeId, Any}, - marker::PhantomData, -}; use crate::{Backend, StorageKey, StorageValue}; +use codec::Encode; use hash_db::Hasher; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, - traits::Externalities, Blake2Hasher, + traits::Externalities, + Blake2Hasher, +}; +use std::{ + any::{Any, TypeId}, + marker::PhantomData, }; -use codec::Encode; /// Trait for inspecting state in any backend. /// @@ -79,39 +80,34 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< } fn storage(&self, key: &[u8]) -> Option { - self.backend.storage(key).expect("Backed failed for storage in ReadOnlyExternalities") + self.backend + .storage(key) + .expect("Backed failed for storage in ReadOnlyExternalities") } fn storage_hash(&self, key: &[u8]) -> Option> { self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) } - fn child_storage( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - self.backend.child_storage(child_info, key).expect("Backed failed for child_storage in ReadOnlyExternalities") + fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + self.backend + .child_storage(child_info, key) + .expect("Backed failed for child_storage in ReadOnlyExternalities") } - fn child_storage_hash( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } fn next_storage_key(&self, key: &[u8]) -> Option { - self.backend.next_storage_key(key).expect("Backed failed for next_storage_key in ReadOnlyExternalities") + self.backend + .next_storage_key(key) + .expect("Backed failed for next_storage_key in ReadOnlyExternalities") } - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Option { - self.backend.next_child_storage_key(child_info, key) + fn next_child_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Option { + self.backend + .next_child_storage_key(child_info, key) .expect("Backed failed for next_child_storage_key in ReadOnlyExternalities") } @@ -128,11 +124,7 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("place_child_storage not supported in ReadOnlyExternalities") } - fn kill_child_storage( - &mut self, - _child_info: &ChildInfo, - _limit: Option, - ) -> (bool, u32) { + fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option) -> (bool, u32) { unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities") } @@ -149,11 +141,7 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("clear_child_prefix is not supported in ReadOnlyExternalities") } - fn storage_append( - &mut self, - _key: Vec, - _value: Vec, - ) { + fn storage_append(&mut self, _key: Vec, _value: Vec) { unimplemented!("storage_append is not supported in ReadOnlyExternalities") } @@ -161,10 +149,7 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("storage_root is not supported in ReadOnlyExternalities") } - fn child_storage_root( - &mut self, - _child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, _child_info: &ChildInfo) -> Vec { unimplemented!("child_storage_root is not supported in ReadOnlyExternalities") } @@ -209,7 +194,9 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< } } -impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore for ReadOnlyExternalities<'a, H, B> { +impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore + for ReadOnlyExternalities<'a, H, B> +{ fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> { unimplemented!("extension_by_type_id is not supported in ReadOnlyExternalities") } @@ -222,7 +209,10 @@ impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore for Rea unimplemented!("register_extension_with_type_id is not supported in ReadOnlyExternalities") } - fn deregister_extension_by_type_id(&mut self, _type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + _type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { unimplemented!("deregister_extension_by_type_id is not supported in ReadOnlyExternalities") } } diff --git a/substrate/primitives/state-machine/src/stats.rs b/substrate/primitives/state-machine/src/stats.rs index 9d4ac27e5e94f648ed01df8c0829f903924c1a21..affd71f9d2e5dfaf4b6edd695fd4ada56d83ca7b 100644 --- a/substrate/primitives/state-machine/src/stats.rs +++ b/substrate/primitives/state-machine/src/stats.rs @@ -17,9 +17,9 @@ //! Usage statistics for state db -#[cfg(feature = "std")] -use std::time::{Instant, Duration}; use sp_std::cell::RefCell; +#[cfg(feature = "std")] +use std::time::{Duration, Instant}; /// Measured count of operations and total bytes. #[derive(Clone, Debug, Default)] diff --git a/substrate/primitives/state-machine/src/testing.rs b/substrate/primitives/state-machine/src/testing.rs index 363d543da086fd168eeea26b8dc2ef2e6060b9a0..ec1772ba8666f53ba9e22c195a1f59ade38b9895 100644 --- a/substrate/primitives/state-machine/src/testing.rs +++ b/substrate/primitives/state-machine/src/testing.rs @@ -17,17 +17,19 @@ //! Test implementation for Externalities. -use std::{any::{Any, TypeId}, panic::{AssertUnwindSafe, UnwindSafe}}; +use std::{ + any::{Any, TypeId}, + panic::{AssertUnwindSafe, UnwindSafe}, +}; use crate::{ - backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, - StorageKey, StorageValue, + backend::Backend, changes_trie::{ - Configuration as ChangesTrieConfiguration, - InMemoryStorage as ChangesTrieInMemoryStorage, - BlockNumber as ChangesTrieBlockNumber, - State as ChangesTrieState, + BlockNumber as ChangesTrieBlockNumber, Configuration as ChangesTrieConfiguration, + InMemoryStorage as ChangesTrieInMemoryStorage, State as ChangesTrieState, }, + ext::Ext, + InMemoryBackend, OverlayedChanges, StorageKey, StorageTransactionCache, StorageValue, }; use codec::Decode; @@ -35,13 +37,13 @@ use hash_db::Hasher; use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ - well_known_keys::{CHANGES_TRIE_CONFIG, CODE, is_child_storage_key}, + well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE}, Storage, }, - traits::TaskExecutorExt, testing::TaskExecutor, + traits::TaskExecutorExt, }; -use sp_externalities::{Extensions, Extension, ExtensionStore}; +use sp_externalities::{Extension, ExtensionStore, Extensions}; /// Simple HashMap-based Externalities impl. pub struct TestExternalities @@ -96,7 +98,9 @@ where /// Create a new instance of `TestExternalities` with code and storage. pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { let mut overlay = OverlayedChanges::default(); - let changes_trie_config = storage.top.get(CHANGES_TRIE_CONFIG) + let changes_trie_config = storage + .top + .get(CHANGES_TRIE_CONFIG) .and_then(|v| Decode::decode(&mut &v[..]).ok()); overlay.set_collect_extrinsics(changes_trie_config.is_some()); @@ -156,17 +160,14 @@ where /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open /// transactions. fn as_backend(&self) -> InMemoryBackend { - let top: Vec<_> = self.overlay.changes() - .map(|(k, v)| (k.clone(), v.value().cloned())) - .collect(); + let top: Vec<_> = + self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); let mut transaction = vec![(None, top)]; for (child_changes, child_info) in self.overlay.children() { transaction.push(( Some(child_info.clone()), - child_changes - .map(|(k, v)| (k.clone(), v.value().cloned())) - .collect(), + child_changes.map(|(k, v)| (k.clone(), v.value().cloned())).collect(), )) } @@ -186,7 +187,8 @@ where &mut Default::default(), )?; - self.backend.apply_transaction(changes.transaction_storage_root, changes.transaction); + self.backend + .apply_transaction(changes.transaction_storage_root, changes.transaction); Ok(()) } @@ -202,18 +204,21 @@ where /// /// Returns the result of the given closure, if no panics occured. /// Otherwise, returns `Err`. - pub fn execute_with_safe(&mut self, f: impl FnOnce() -> R + UnwindSafe) -> Result { + pub fn execute_with_safe( + &mut self, + f: impl FnOnce() -> R + UnwindSafe, + ) -> Result { let mut ext = AssertUnwindSafe(self.ext()); - std::panic::catch_unwind(move || + std::panic::catch_unwind(move || { sp_externalities::set_and_run_with_externalities(&mut *ext, f) - ).map_err(|e| { - format!("Closure panicked: {:?}", e) }) + .map_err(|e| format!("Closure panicked: {:?}", e)) } } impl std::fmt::Debug for TestExternalities - where H::Out: Ord + codec::Codec, +where + H::Out: Ord + codec::Codec, { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "overlay: {:?}\nbackend: {:?}", self.overlay, self.backend.pairs()) @@ -221,8 +226,8 @@ impl std::fmt::Debug for TestExternalities } impl PartialEq for TestExternalities - where - H::Out: Ord + 'static + codec::Codec +where + H::Out: Ord + 'static + codec::Codec, { /// This doesn't test if they are in the same state, only if they contains the /// same data at this state @@ -232,22 +237,25 @@ impl PartialEq for TestExternalities } impl Default for TestExternalities - where - H::Out: Ord + 'static + codec::Codec, +where + H::Out: Ord + 'static + codec::Codec, { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + Self::new(Default::default()) + } } impl From for TestExternalities - where - H::Out: Ord + 'static + codec::Codec, +where + H::Out: Ord + 'static + codec::Codec, { fn from(storage: Storage) -> Self { Self::new(storage) } } -impl sp_externalities::ExtensionStore for TestExternalities where +impl sp_externalities::ExtensionStore for TestExternalities +where H: Hasher, H::Out: Ord + codec::Codec, N: ChangesTrieBlockNumber, @@ -264,7 +272,10 @@ impl sp_externalities::ExtensionStore for TestExternalities where self.extensions.register_with_type_id(type_id, extension) } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if self.extensions.deregister(type_id) { Ok(()) } else { @@ -274,14 +285,13 @@ impl sp_externalities::ExtensionStore for TestExternalities where } impl sp_externalities::ExternalitiesExt for TestExternalities - where - H: Hasher, - H::Out: Ord + codec::Codec, - N: ChangesTrieBlockNumber, +where + H: Hasher, + H::Out: Ord + codec::Codec, + N: ChangesTrieBlockNumber, { fn extension(&mut self) -> Option<&mut T> { - self.extension_by_type_id(TypeId::of::()) - .and_then(::downcast_mut) + self.extension_by_type_id(TypeId::of::()).and_then(::downcast_mut) } fn register_extension(&mut self, ext: T) -> Result<(), sp_externalities::Error> { @@ -296,9 +306,9 @@ impl sp_externalities::ExternalitiesExt for TestExternalities #[cfg(test)] mod tests { use super::*; - use sp_core::{H256, traits::Externalities, storage::ChildInfo}; - use sp_runtime::traits::BlakeTwo256; use hex_literal::hex; + use sp_core::{storage::ChildInfo, traits::Externalities, H256}; + use sp_runtime::traits::BlakeTwo256; #[test] fn commit_should_work() { @@ -307,7 +317,8 @@ mod tests { ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - let root = H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); + let root = + H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); assert_eq!(H256::from_slice(ext.storage_root().as_slice()), root); } @@ -325,7 +336,7 @@ mod tests { #[test] fn check_send() { fn assert_send() {} - assert_send::>(); + assert_send::>(); } #[test] diff --git a/substrate/primitives/state-machine/src/trie_backend.rs b/substrate/primitives/state-machine/src/trie_backend.rs index 6162a9866a46ce844b8a72ae60b02958e63d8cd8..e8c9fa475cffd6fc862f1da1cbccde39fb4db20a 100644 --- a/substrate/primitives/state-machine/src/trie_backend.rs +++ b/substrate/primitives/state-machine/src/trie_backend.rs @@ -17,29 +17,33 @@ //! Trie-based state machine backend. -use crate::{warn, debug}; -use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildType}; -use codec::{Codec, Decode}; use crate::{ - StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, + debug, + trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, + warn, Backend, StorageKey, StorageValue, }; +use codec::{Codec, Decode}; +use hash_db::Hasher; +use sp_core::storage::{ChildInfo, ChildType}; use sp_std::{boxed::Box, vec::Vec}; +use sp_trie::{ + child_delta_trie_root, delta_trie_root, empty_child_trie_root, + trie_types::{Layout, TrieDB, TrieError}, + Trie, +}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { - pub (crate) essence: TrieBackendEssence, + pub(crate) essence: TrieBackendEssence, } -impl, H: Hasher> TrieBackend where H::Out: Codec { +impl, H: Hasher> TrieBackend +where + H::Out: Codec, +{ /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { - essence: TrieBackendEssence::new(storage, root), - } + TrieBackend { essence: TrieBackendEssence::new(storage, root) } } /// Get backend essence reference. @@ -74,7 +78,8 @@ impl, H: Hasher> sp_std::fmt::Debug for TrieBackend, H: Hasher> Backend for TrieBackend where +impl, H: Hasher> Backend for TrieBackend +where H::Out: Ord + Codec, { type Error = crate::DefaultError; @@ -121,7 +126,8 @@ impl, H: Hasher> Backend for TrieBackend where f: F, allow_missing: bool, ) -> Result { - self.essence.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) + self.essence + .apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing) } fn apply_to_keys_while bool>( @@ -159,7 +165,7 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => { debug!(target: "trie", "Error extracting trie values: {}", e); Vec::new() - } + }, } } @@ -177,21 +183,23 @@ impl, H: Hasher> Backend for TrieBackend where Ok(v) }; - collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() + collect_all() + .map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)) + .unwrap_or_default() } fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); match delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) { Ok(ret) => root = ret, @@ -205,17 +213,21 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { let default_root = match child_info.child_type() { - ChildType::ParentKeyId => empty_child_trie_root::>() + ChildType::ParentKeyId => empty_child_trie_root::>(), }; let mut write_overlay = S::Overlay::default(); let prefixed_storage_key = child_info.prefixed_storage_key(); let mut root = match self.storage(prefixed_storage_key.as_slice()) { - Ok(value) => - value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or_else(|| default_root.clone()), + Ok(value) => value + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .unwrap_or_else(|| default_root.clone()), Err(e) => { warn!(target: "trie", "Failed to read child storage root: {}", e); default_root.clone() @@ -223,10 +235,7 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); match child_delta_trie_root::, _, _, _, _, _, _>( child_info.keyspace(), @@ -248,7 +257,7 @@ impl, H: Hasher> Backend for TrieBackend where Some(self) } - fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) { } + fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} fn usage_info(&self) -> crate::UsageInfo { crate::UsageInfo::empty() @@ -261,12 +270,12 @@ impl, H: Hasher> Backend for TrieBackend where #[cfg(test)] pub mod tests { - use std::{collections::HashSet, iter}; - use sp_core::H256; + use super::*; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_core::H256; use sp_runtime::traits::BlakeTwo256; - use super::*; + use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; + use std::{collections::HashSet, iter}; const CHILD_KEY_1: &[u8] = b"sub1"; @@ -312,7 +321,9 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), + test_trie + .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") + .unwrap(), Some(vec![142u8]), ); } @@ -332,7 +343,9 @@ pub mod tests { assert!(TrieBackend::, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), - ).pairs().is_empty()); + ) + .pairs() + .is_empty()); } #[test] @@ -347,9 +360,8 @@ pub mod tests { #[test] fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root( - iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), - ); + let (new_root, mut tx) = + test_trie().storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..])))); assert!(!tx.drain().is_empty()); assert!(new_root != test_trie().storage_root(iter::empty()).0); } diff --git a/substrate/primitives/state-machine/src/trie_backend_essence.rs b/substrate/primitives/state-machine/src/trie_backend_essence.rs index 54124e6754a52b768c2b18926455dca8697c6c7d..06a99f938803919567e27893023e5caff27964f2 100644 --- a/substrate/primitives/state-machine/src/trie_backend_essence.rs +++ b/substrate/primitives/state-machine/src/trie_backend_essence.rs @@ -18,24 +18,24 @@ //! Trie-based state machine backend essence used to read values //! from storage. -#[cfg(feature = "std")] -use std::sync::Arc; -use sp_std::{ops::Deref, boxed::Box, vec::Vec}; -use crate::{warn, debug}; +use crate::{backend::Consolidate, debug, warn, StorageKey, StorageValue}; +use codec::Encode; use hash_db::{self, Hasher, Prefix}; -use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - empty_child_trie_root, read_trie_value, read_child_trie_value, - KeySpacedDB, TrieDBIterator}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; -use codec::Encode; +use sp_std::{boxed::Box, ops::Deref, vec::Vec}; +use sp_trie::{ + empty_child_trie_root, read_child_trie_value, read_trie_value, + trie_types::{Layout, TrieDB, TrieError}, + DBValue, KeySpacedDB, MemoryDB, PrefixedMemoryDB, Trie, TrieDBIterator, +}; +#[cfg(feature = "std")] +use std::sync::Arc; #[cfg(not(feature = "std"))] macro_rules! format { - ($($arg:tt)+) => ( + ($($arg:tt)+) => { crate::DefaultError - ); + }; } type Result = sp_std::result::Result; @@ -53,14 +53,13 @@ pub struct TrieBackendEssence, H: Hasher> { empty: H::Out, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence +where + H::Out: Encode, +{ /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { - TrieBackendEssence { - storage, - root, - empty: H::hash(&[0u8]), - } + TrieBackendEssence { storage, root, empty: H::hash(&[0u8]) } } /// Get backend storage reference. @@ -114,7 +113,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let mut hash = H::Out::default(); if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); + return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())) } // note: child_root and hash must be same size, panics otherwise. hash.as_mut().copy_from_slice(&child_root[..]); @@ -138,10 +137,9 @@ impl, H: Hasher> TrieBackendEssence where H::Out: dyn_eph = self; } - let trie = TrieDB::::new(dyn_eph, root) - .map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.iter() - .map_err(|e| format!("TrieDB iteration error: {}", e))?; + let trie = + TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; + let mut iter = trie.iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; // The key just after the one given in input, basically `key++0`. // Note: We are sure this is the next key if: @@ -157,8 +155,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let next_element = iter.next(); let next_key = if let Some(next_element) = next_element { - let (next_key, _) = next_element - .map_err(|e| format!("TrieDB iterator next error: {}", e))?; + let (next_key, _) = + next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; Some(next_key) } else { None @@ -180,7 +178,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: &ChildInfo, key: &[u8], ) -> Result> { - let root = self.child_root(child_info)? + let root = self + .child_root(child_info)? .unwrap_or_else(|| empty_child_trie_root::>().encode()); let map_e = |e| format!("Trie lookup error: {}", e); @@ -210,20 +209,13 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &child_root } else { - return Ok(true); + return Ok(true) } } else { &self.root }; - self.trie_iter_inner( - &root, - prefix, - f, - child_info, - start_at, - allow_missing_nodes, - ) + self.trie_iter_inner(&root, prefix, f, child_info, start_at, allow_missing_nodes) } /// Retrieve all entries keys of a storage and call `f` for each of those keys. @@ -240,8 +232,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } + return + }, }; child_root.as_mut().copy_from_slice(&root_vec); &child_root @@ -249,7 +241,17 @@ impl, H: Hasher> TrieBackendEssence where H::Out: &self.root }; - let _ = self.trie_iter_inner(root, prefix, |k, _v| { f(&k); true}, child_info, None, false); + let _ = self.trie_iter_inner( + root, + prefix, + |k, _v| { + f(&k); + true + }, + child_info, + None, + false, + ); } /// Execute given closure for all keys starting with prefix. @@ -263,17 +265,37 @@ impl, H: Hasher> TrieBackendEssence where H::Out: Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } + return + }, }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - let _ = self.trie_iter_inner(&root, Some(prefix), |k, _v| { f(&k); true }, Some(child_info), None, false); + let _ = self.trie_iter_inner( + &root, + Some(prefix), + |k, _v| { + f(&k); + true + }, + Some(child_info), + None, + false, + ); } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - let _ = self.trie_iter_inner(&self.root, Some(prefix), |k, _v| { f(&k); true }, None, None, false); + let _ = self.trie_iter_inner( + &self.root, + Some(prefix), + |k, _v| { + f(&k); + true + }, + None, + None, + false, + ); } fn trie_iter_inner, Vec) -> bool>( @@ -315,14 +337,25 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; match result { Ok(completed) => Ok(completed), - Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => Ok(false), + Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => + Ok(false), Err(e) => Err(format!("TrieDB iteration error: {}", e)), } } /// Execute given closure for all key and values starting with prefix. pub fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { - let _ = self.trie_iter_inner(&self.root, Some(prefix), |k, v| {f(&k, &v); true}, None, None, false); + let _ = self.trie_iter_inner( + &self.root, + Some(prefix), + |k, v| { + f(&k, &v); + true + }, + None, + None, + false, + ); } } @@ -334,16 +367,17 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + self + } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { - Ephemeral { - storage, - overlay, - } + Ephemeral { storage, overlay } } } @@ -431,13 +465,15 @@ impl TrieBackendStorage for MemoryDB { impl, H: Hasher> hash_db::AsHashDB for TrieBackendEssence { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + self + } } -impl, H: Hasher> hash_db::HashDB - for TrieBackendEssence -{ +impl, H: Hasher> hash_db::HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { return Some([0u8].to_vec()) @@ -480,12 +516,11 @@ impl, H: Hasher> hash_db::HashDBRef } } - #[cfg(test)] mod test { - use sp_core::{Blake2Hasher, H256}; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; + use sp_core::{Blake2Hasher, H256}; + use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; #[test] fn next_storage_key_and_next_child_storage_key_work() { @@ -529,20 +564,10 @@ mod test { let mdb = essence_1.into_storage(); let essence_2 = TrieBackendEssence::new(mdb, root_2); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(child_info, b"6"), Ok(None) - ); + assert_eq!(essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec()))); + assert_eq!(essence_2.next_child_storage_key(child_info, b"6"), Ok(None)); } } diff --git a/substrate/primitives/std/src/lib.rs b/substrate/primitives/std/src/lib.rs index 6acf4b75967aed8fb270cc54681a4f43285c1181..3af4d07ac6297ef01b50fd8525399ad775ec85c9 100644 --- a/substrate/primitives/std/src/lib.rs +++ b/substrate/primitives/std/src/lib.rs @@ -19,11 +19,14 @@ //! or client/alloc to be used with any code that depends on the runtime. #![cfg_attr(not(feature = "std"), no_std)] - -#![cfg_attr(feature = "std", - doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), - doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] #[macro_export] macro_rules! map { @@ -55,7 +58,7 @@ macro_rules! if_std { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! if_std { - ( $( $code:tt )* ) => {} + ( $( $code:tt )* ) => {}; } #[cfg(feature = "std")] @@ -64,7 +67,6 @@ include!("../with_std.rs"); #[cfg(not(feature = "std"))] include!("../without_std.rs"); - /// A target for `core::write!` macro - constructs a string in memory. #[derive(Default)] pub struct Writer(vec::Vec); @@ -92,10 +94,12 @@ impl Writer { /// /// This should include only things which are in the normal std prelude. pub mod prelude { - pub use crate::vec::Vec; - pub use crate::boxed::Box; - pub use crate::cmp::{Eq, PartialEq, Reverse}; - pub use crate::clone::Clone; + pub use crate::{ + boxed::Box, + clone::Clone, + cmp::{Eq, PartialEq, Reverse}, + vec::Vec, + }; // Re-export `vec!` macro here, but not in `std` mode, since // std's prelude already brings `vec!` into the scope. diff --git a/substrate/primitives/storage/src/lib.rs b/substrate/primitives/storage/src/lib.rs index 87c10f770a8abb118299026868a8cbc334c94490..45474a44693ab1d22f0a0e384ba1d7fa902ca47e 100644 --- a/substrate/primitives/storage/src/lib.rs +++ b/substrate/primitives/storage/src/lib.rs @@ -20,16 +20,22 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, ops::{Deref, DerefMut}}; +use codec::{Decode, Encode}; use ref_cast::RefCast; -use codec::{Encode, Decode}; +use sp_std::{ + ops::{Deref, DerefMut}, + vec::Vec, +}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode))] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode) +)] pub struct StorageKey( #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] pub Vec, ); @@ -53,12 +59,7 @@ pub struct TrackedStorageKey { impl TrackedStorageKey { /// Create a default `TrackedStorageKey` pub fn new(key: Vec) -> Self { - Self { - key, - reads: 0, - writes: 0, - whitelisted: false, - } + Self { key, reads: 0, writes: 0, whitelisted: false } } /// Check if this key has been "read", i.e. it exists in the memory overlay. /// @@ -90,12 +91,7 @@ impl TrackedStorageKey { // Easily convert a key to a `TrackedStorageKey` that has been whitelisted. impl From> for TrackedStorageKey { fn from(key: Vec) -> Self { - Self { - key: key, - reads: 0, - writes: 0, - whitelisted: true, - } + Self { key, reads: 0, writes: 0, whitelisted: true } } } @@ -105,8 +101,7 @@ impl From> for TrackedStorageKey { #[repr(transparent)] #[derive(RefCast)] pub struct PrefixedStorageKey( - #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] - Vec, + #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] Vec, ); impl Deref for PrefixedStorageKey { @@ -235,7 +230,6 @@ pub mod well_known_keys { CHILD_STORAGE_KEY_PREFIX.starts_with(key) } } - } /// Information related to a child state. @@ -257,9 +251,7 @@ impl ChildInfo { /// Same as `new_default` but with `Vec` as input. pub fn new_default_from_vec(storage_key: Vec) -> Self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - data: storage_key, - }) + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data: storage_key }) } /// Try to update with another instance, return false if both instance @@ -284,9 +276,7 @@ impl ChildInfo { /// child trie. pub fn storage_key(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - data, - }) => &data[..], + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => &data[..], } } @@ -294,9 +284,8 @@ impl ChildInfo { /// this trie. pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - data, - }) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => + ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), } } @@ -304,9 +293,7 @@ impl ChildInfo { /// this trie. pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { match self { - ChildInfo::ParentKeyId(ChildTrieParentKeyId { - mut data, - }) => { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { mut data }) => { ChildType::ParentKeyId.do_prefix_key(&mut data); PrefixedStorageKey(data) }, diff --git a/substrate/primitives/tasks/src/async_externalities.rs b/substrate/primitives/tasks/src/async_externalities.rs index 8402246cb4e2968689bbbc4136cf266076f530c4..975a81af4f53d689422b709d5547c82f1fc89761 100644 --- a/substrate/primitives/tasks/src/async_externalities.rs +++ b/substrate/primitives/tasks/src/async_externalities.rs @@ -18,12 +18,12 @@ //! Async externalities. -use std::any::{TypeId, Any}; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, - traits::{Externalities, SpawnNamed, TaskExecutorExt, RuntimeSpawnExt, RuntimeSpawn}, + traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt}, }; use sp_externalities::{Extensions, ExternalitiesExt as _}; +use std::any::{Any, TypeId}; /// Simple state-less externalities for use in async context. /// @@ -34,7 +34,9 @@ pub struct AsyncExternalities { } /// New Async externalities. -pub fn new_async_externalities(scheduler: Box) -> Result { +pub fn new_async_externalities( + scheduler: Box, +) -> Result { let mut res = AsyncExternalities { extensions: Default::default() }; let mut ext = &mut res as &mut dyn Externalities; ext.register_extension::(TaskExecutorExt(scheduler.clone())) @@ -74,19 +76,11 @@ impl Externalities for AsyncExternalities { panic!("`storage_hash`: should not be used in async externalities!") } - fn child_storage( - &self, - _child_info: &ChildInfo, - _key: &[u8], - ) -> Option { + fn child_storage(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { panic!("`child_storage`: should not be used in async externalities!") } - fn child_storage_hash( - &self, - _child_info: &ChildInfo, - _key: &[u8], - ) -> Option> { + fn child_storage_hash(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option> { panic!("`child_storage_hash`: should not be used in async externalities!") } @@ -94,11 +88,7 @@ impl Externalities for AsyncExternalities { panic!("`next_storage_key`: should not be used in async externalities!") } - fn next_child_storage_key( - &self, - _child_info: &ChildInfo, - _key: &[u8], - ) -> Option { + fn next_child_storage_key(&self, _child_info: &ChildInfo, _key: &[u8]) -> Option { panic!("`next_child_storage_key`: should not be used in async externalities!") } @@ -115,11 +105,7 @@ impl Externalities for AsyncExternalities { panic!("`place_child_storage`: should not be used in async externalities!") } - fn kill_child_storage( - &mut self, - _child_info: &ChildInfo, - _limit: Option, - ) -> (bool, u32) { + fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option) -> (bool, u32) { panic!("`kill_child_storage`: should not be used in async externalities!") } @@ -136,11 +122,7 @@ impl Externalities for AsyncExternalities { panic!("`clear_child_prefix`: should not be used in async externalities!") } - fn storage_append( - &mut self, - _key: Vec, - _value: Vec, - ) { + fn storage_append(&mut self, _key: Vec, _value: Vec) { panic!("`storage_append`: should not be used in async externalities!") } @@ -148,10 +130,7 @@ impl Externalities for AsyncExternalities { panic!("`storage_root`: should not be used in async externalities!") } - fn child_storage_root( - &mut self, - _child_info: &ChildInfo, - ) -> Vec { + fn child_storage_root(&mut self, _child_info: &ChildInfo) -> Vec { panic!("`child_storage_root`: should not be used in async externalities!") } @@ -209,7 +188,10 @@ impl sp_externalities::ExtensionStore for AsyncExternalities { self.extensions.register_with_type_id(type_id, extension) } - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { if self.extensions.deregister(type_id) { Ok(()) } else { diff --git a/substrate/primitives/tasks/src/lib.rs b/substrate/primitives/tasks/src/lib.rs index 96aca0e1cef6b70e1a4027bbd84fefc095c0749a..e9c80ae5ff4c81a0b33eaf7f05010e973ab9dd68 100644 --- a/substrate/primitives/tasks/src/lib.rs +++ b/substrate/primitives/tasks/src/lib.rs @@ -49,7 +49,6 @@ //! //! When allowing unbounded parallelism, malicious transactions can exploit it and partition //! network consensus based on how much resources nodes have. -//! #![cfg_attr(not(feature = "std"), no_std)] @@ -61,9 +60,9 @@ pub use async_externalities::{new_async_externalities, AsyncExternalities}; #[cfg(feature = "std")] mod inner { - use std::{panic::AssertUnwindSafe, sync::mpsc}; - use sp_externalities::ExternalitiesExt as _; use sp_core::traits::TaskExecutorExt; + use sp_externalities::ExternalitiesExt as _; + use std::{panic::AssertUnwindSafe, sync::mpsc}; /// Task handle (wasm). /// @@ -77,55 +76,62 @@ mod inner { impl DataJoinHandle { /// Join handle returned by `spawn` function pub fn join(self) -> Vec { - self.receiver.recv().expect("Spawned runtime task terminated before sending result.") + self.receiver + .recv() + .expect("Spawned runtime task terminated before sending result.") } } /// Spawn new runtime task (native). pub fn spawn(entry_point: fn(Vec) -> Vec, data: Vec) -> DataJoinHandle { - let scheduler = sp_externalities::with_externalities(|mut ext| ext.extension::() - .expect("No task executor associated with the current context!") - .clone() - ).expect("Spawn called outside of externalities context!"); + let scheduler = sp_externalities::with_externalities(|mut ext| { + ext.extension::() + .expect("No task executor associated with the current context!") + .clone() + }) + .expect("Spawn called outside of externalities context!"); let (sender, receiver) = mpsc::channel(); let extra_scheduler = scheduler.clone(); - scheduler.spawn("parallel-runtime-spawn", Box::pin(async move { - let result = match crate::new_async_externalities(extra_scheduler) { - Ok(mut ext) => { - let mut ext = AssertUnwindSafe(&mut ext); - match std::panic::catch_unwind(move || { - sp_externalities::set_and_run_with_externalities( - &mut **ext, - move || entry_point(data), - ) - }) { - Ok(result) => result, - Err(panic) => { - log::error!( - target: "runtime", - "Spawned task panicked: {:?}", - panic, - ); - - // This will drop sender without sending anything. - return; + scheduler.spawn( + "parallel-runtime-spawn", + Box::pin(async move { + let result = match crate::new_async_externalities(extra_scheduler) { + Ok(mut ext) => { + let mut ext = AssertUnwindSafe(&mut ext); + match std::panic::catch_unwind(move || { + sp_externalities::set_and_run_with_externalities( + &mut **ext, + move || entry_point(data), + ) + }) { + Ok(result) => result, + Err(panic) => { + log::error!( + target: "runtime", + "Spawned task panicked: {:?}", + panic, + ); + + // This will drop sender without sending anything. + return + }, } - } - }, - Err(e) => { - log::error!( - target: "runtime", - "Unable to run async task: {}", - e, - ); - - return; - }, - }; - - let _ = sender.send(result); - })); + }, + Err(e) => { + log::error!( + target: "runtime", + "Unable to run async task: {}", + e, + ); + + return + }, + }; + + let _ = sender.send(result); + }), + ); DataJoinHandle { receiver } } @@ -146,7 +152,11 @@ mod inner { /// /// NOTE: Since this dynamic dispatch function and the invoked function are compiled with /// the same compiler, there should be no problem with ABI incompatibility. - extern "C" fn dispatch_wrapper(func_ref: *const u8, payload_ptr: *mut u8, payload_len: u32) -> u64 { + extern "C" fn dispatch_wrapper( + func_ref: *const u8, + payload_ptr: *mut u8, + payload_len: u32, + ) -> u64 { let payload_len = payload_len as usize; let output = unsafe { let payload = Vec::from_raw_parts(payload_ptr, payload_len, payload_len); @@ -160,11 +170,8 @@ mod inner { pub fn spawn(entry_point: fn(Vec) -> Vec, payload: Vec) -> DataJoinHandle { let func_ptr: usize = unsafe { mem::transmute(entry_point) }; - let handle = sp_io::runtime_tasks::spawn( - dispatch_wrapper as usize as _, - func_ptr as u32, - payload, - ); + let handle = + sp_io::runtime_tasks::spawn(dispatch_wrapper as usize as _, func_ptr as u32, payload); DataJoinHandle { handle } } @@ -185,7 +192,7 @@ mod inner { } } -pub use inner::{DataJoinHandle, spawn}; +pub use inner::{spawn, DataJoinHandle}; #[cfg(test)] mod tests { @@ -211,7 +218,7 @@ mod tests { #[test] fn panicking() { - let res = sp_io::TestExternalities::default().execute_with_safe(||{ + let res = sp_io::TestExternalities::default().execute_with_safe(|| { spawn(async_panicker, vec![5, 2, 1]).join(); }); @@ -220,28 +227,30 @@ mod tests { #[test] fn many_joins() { - sp_io::TestExternalities::default().execute_with_safe(|| { - // converges to 1 only after 1000+ steps - let mut running_val = 9780657630u64; - let mut data = vec![]; - let handles = (0..1024).map( - |_| { - running_val = if running_val % 2 == 0 { - running_val / 2 - } else { - 3 * running_val + 1 - }; - data.push(running_val as u8); - (spawn(async_runner, data.clone()), data.clone()) + sp_io::TestExternalities::default() + .execute_with_safe(|| { + // converges to 1 only after 1000+ steps + let mut running_val = 9780657630u64; + let mut data = vec![]; + let handles = (0..1024) + .map(|_| { + running_val = if running_val % 2 == 0 { + running_val / 2 + } else { + 3 * running_val + 1 + }; + data.push(running_val as u8); + (spawn(async_runner, data.clone()), data.clone()) + }) + .collect::>(); + + for (handle, mut data) in handles { + let result = handle.join(); + data.sort(); + + assert_eq!(result, data); } - ).collect::>(); - - for (handle, mut data) in handles { - let result = handle.join(); - data.sort(); - - assert_eq!(result, data); - } - }).expect("Failed to run with externalities"); + }) + .expect("Failed to run with externalities"); } } diff --git a/substrate/primitives/test-primitives/src/lib.rs b/substrate/primitives/test-primitives/src/lib.rs index ed408f338e49a0737a1ec91e9dc05f8a2d308fd3..d988160b1dc7b922ea81d0dee375ba31572715e5 100644 --- a/substrate/primitives/test-primitives/src/lib.rs +++ b/substrate/primitives/test-primitives/src/lib.rs @@ -19,13 +19,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; -use sp_application_crypto::sr25519; pub use sp_application_crypto; +use sp_application_crypto::sr25519; pub use sp_core::{hash::H256, RuntimeDebug}; -use sp_runtime::traits::{BlakeTwo256, Verify, Extrinsic as ExtrinsicT,}; +use sp_runtime::traits::{BlakeTwo256, Extrinsic as ExtrinsicT, Verify}; /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] @@ -37,7 +37,10 @@ pub enum Extrinsic { #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -80,8 +83,5 @@ pub type Header = sp_runtime::generic::Header; /// Changes trie configuration (optionally) used in tests. pub fn changes_trie_config() -> sp_core::ChangesTrieConfiguration { - sp_core::ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - } + sp_core::ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 } } diff --git a/substrate/primitives/timestamp/src/lib.rs b/substrate/primitives/timestamp/src/lib.rs index 542522c9b85007ab7571a627abb6b180629e22a9..892d359d8e88913a33165ea13e097924cde125c8 100644 --- a/substrate/primitives/timestamp/src/lib.rs +++ b/substrate/primitives/timestamp/src/lib.rs @@ -19,8 +19,8 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode}; -use sp_inherents::{InherentIdentifier, IsFatalError, InherentData}; +use codec::{Decode, Encode}; +use sp_inherents::{InherentData, InherentIdentifier, IsFatalError}; use sp_std::time::Duration; /// The identifier for the `timestamp` inherent. @@ -190,10 +190,7 @@ impl InherentDataProvider { /// Create `Self` using the given `timestamp`. pub fn new(timestamp: InherentType) -> Self { - Self { - max_drift: std::time::Duration::from_secs(60).into(), - timestamp, - } + Self { max_drift: std::time::Duration::from_secs(60).into(), timestamp } } /// With the given maximum drift. @@ -249,9 +246,9 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { // halt import until timestamp is valid. // reject when too far ahead. if valid > timestamp + max_drift { - return Some(Err( - sp_inherents::Error::Application(Box::from(InherentError::TooFarInFuture)) - )) + return Some(Err(sp_inherents::Error::Application(Box::from( + InherentError::TooFarInFuture, + )))) } let diff = valid.checked_sub(timestamp).unwrap_or_default(); @@ -269,4 +266,3 @@ impl sp_inherents::InherentDataProvider for InherentDataProvider { } } } - diff --git a/substrate/primitives/tracing/src/lib.rs b/substrate/primitives/tracing/src/lib.rs index 95eb4d05667091f11e9d30198534be844fcd270c..9522e6df633ac38ba33565c1289fafe51ea5c9ff 100644 --- a/substrate/primitives/tracing/src/lib.rs +++ b/substrate/primitives/tracing/src/lib.rs @@ -40,18 +40,16 @@ #[cfg(feature = "std")] use tracing; pub use tracing::{ - debug, debug_span, error, error_span, event, info, info_span, Level, span, Span, - trace, trace_span, warn, warn_span, + debug, debug_span, error, error_span, event, info, info_span, span, trace, trace_span, warn, + warn_span, Level, Span, }; pub use crate::types::{ - WasmEntryAttributes, WasmFieldName, WasmFields, WasmLevel, WasmMetadata, WasmValue, - WasmValuesSet + WasmEntryAttributes, WasmFieldName, WasmFields, WasmLevel, WasmMetadata, WasmValue, + WasmValuesSet, }; #[cfg(feature = "std")] -pub use crate::types::{ - WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER -}; +pub use crate::types::{WASM_NAME_KEY, WASM_TARGET_KEY, WASM_TRACE_IDENTIFIER}; /// Tracing facilities and helpers. /// @@ -78,19 +76,18 @@ pub use crate::types::{ /// ```rust /// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "fn wide span"); /// { -/// sp_tracing::enter_span!(sp_tracing::trace_span!("outer-span")); -/// { -/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); -/// // .. -/// } // inner span exists here -/// } // outer span exists here +/// sp_tracing::enter_span!(sp_tracing::trace_span!("outer-span")); +/// { +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); +/// // .. +/// } // inner span exists here +/// } // outer span exists here /// /// sp_tracing::within_span! { -/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); +/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); /// 1 + 1; /// // some other complex code /// } // debug span ends here -/// /// ``` /// /// @@ -108,7 +105,6 @@ pub use crate::types::{ /// and call `set_tracing_subscriber` at the very beginning of your execution – /// the default subscriber is doing nothing, so any spans or events happening before /// will not be recorded! - mod types; /// Try to init a simple tracing subscriber with log compatibility layer. @@ -117,7 +113,8 @@ mod types; pub fn try_init_simple() { let _ = tracing_subscriber::fmt() .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) - .with_writer(std::io::stderr).try_init(); + .with_writer(std::io::stderr) + .try_init(); } /// Runs given code within a tracing span, measuring it's execution time. @@ -129,20 +126,20 @@ pub fn try_init_simple() { /// /// ``` /// sp_tracing::within_span! { -/// sp_tracing::Level::TRACE, +/// sp_tracing::Level::TRACE, /// "test-span"; /// 1 + 1; /// // some other complex code /// } /// /// sp_tracing::within_span! { -/// sp_tracing::span!(sp_tracing::Level::WARN, "warn-span", you_can_pass="any params"); +/// sp_tracing::span!(sp_tracing::Level::WARN, "warn-span", you_can_pass="any params"); /// 1 + 1; /// // some other complex code /// } /// /// sp_tracing::within_span! { -/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); +/// sp_tracing::debug_span!("debug-span", you_can_pass="any params"); /// 1 + 1; /// // some other complex code /// } @@ -189,13 +186,12 @@ macro_rules! within_span { }; } - /// Enter a span - noop for `no_std` without `with-tracing` #[cfg(all(not(feature = "std"), not(feature = "with-tracing")))] #[macro_export] macro_rules! enter_span { - ( $lvl:expr, $name:expr ) => ( ); - ( $name:expr ) => ( ) // no-op + ( $lvl:expr, $name:expr ) => {}; + ( $name:expr ) => {}; // no-op } /// Enter a span. @@ -217,13 +213,12 @@ macro_rules! enter_span { /// sp_tracing::enter_span!(sp_tracing::info_span!("info-span", params="value")); /// /// { -/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "outer-span"); -/// { -/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); -/// // .. -/// } // inner span exists here -/// } // outer span exists here -/// +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "outer-span"); +/// { +/// sp_tracing::enter_span!(sp_tracing::Level::TRACE, "inner-span"); +/// // .. +/// } // inner span exists here +/// } // outer span exists here /// ``` #[cfg(any(feature = "std", feature = "with-tracing"))] #[macro_export] diff --git a/substrate/primitives/tracing/src/types.rs b/substrate/primitives/tracing/src/types.rs index 9fdcdfb5263991e2d32dc45a3ded199e3e12b4a3..355e2fa451dbdb1c122556e907b0b74a66fb7923 100644 --- a/substrate/primitives/tracing/src/types.rs +++ b/substrate/primitives/tracing/src/types.rs @@ -15,15 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use codec::{Decode, Encode}; /// Types for wasm based tracing. Loosly inspired by `tracing-core` but /// optimised for the specific use case. - -use core::{format_args, fmt::Debug}; -use sp_std::{ - vec, vec::Vec, -}; -use sp_std::Writer; -use codec::{Encode, Decode}; +use core::{fmt::Debug, format_args}; +use sp_std::{vec, vec::Vec, Writer}; /// The Tracing Level – the user can filter by this #[derive(Clone, Encode, Decode, Debug)] @@ -37,10 +33,9 @@ pub enum WasmLevel { /// Further information for debugging purposes DEBUG, /// The lowest level, keeping track of minute detail - TRACE + TRACE, } - impl From<&tracing_core::Level> for WasmLevel { fn from(l: &tracing_core::Level) -> WasmLevel { match *l { @@ -80,41 +75,27 @@ pub enum WasmValue { impl core::fmt::Debug for WasmValue { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { match self { - WasmValue::U8(ref i) => { - f.write_fmt(format_args!("{}_u8", i)) - } - WasmValue::I8(ref i) => { - f.write_fmt(format_args!("{}_i8", i)) - } - WasmValue::U32(ref i) => { - f.write_fmt(format_args!("{}_u32", i)) - } - WasmValue::I32(ref i) => { - f.write_fmt(format_args!("{}_i32", i)) - } - WasmValue::I64(ref i) => { - f.write_fmt(format_args!("{}_i64", i)) - } - WasmValue::U64(ref i) => { - f.write_fmt(format_args!("{}_u64", i)) - } - WasmValue::Bool(ref i) => { - f.write_fmt(format_args!("{}_bool", i)) - } + WasmValue::U8(ref i) => f.write_fmt(format_args!("{}_u8", i)), + WasmValue::I8(ref i) => f.write_fmt(format_args!("{}_i8", i)), + WasmValue::U32(ref i) => f.write_fmt(format_args!("{}_u32", i)), + WasmValue::I32(ref i) => f.write_fmt(format_args!("{}_i32", i)), + WasmValue::I64(ref i) => f.write_fmt(format_args!("{}_i64", i)), + WasmValue::U64(ref i) => f.write_fmt(format_args!("{}_u64", i)), + WasmValue::Bool(ref i) => f.write_fmt(format_args!("{}_bool", i)), WasmValue::Formatted(ref i) | WasmValue::Str(ref i) => { if let Ok(v) = core::str::from_utf8(i) { f.write_fmt(format_args!("{}", v)) } else { f.write_fmt(format_args!("{:?}", i)) } - } + }, WasmValue::Encoded(ref v) => { f.write_str("Scale(")?; - for byte in v { - f.write_fmt(format_args!("{:02x}", byte))?; - } + for byte in v { + f.write_fmt(format_args!("{:02x}", byte))?; + } f.write_str(")") - } + }, } } } @@ -297,7 +278,6 @@ impl core::fmt::Debug for WasmValuesSet { } } - impl From)>> for WasmValuesSet { fn from(v: Vec<(WasmFieldName, Option)>) -> Self { WasmValuesSet(v) @@ -324,34 +304,20 @@ impl WasmValuesSet { impl tracing_core::field::Visit for WasmValuesSet { fn record_debug(&mut self, field: &tracing_core::field::Field, value: &dyn Debug) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(format_args!("{:?}", value))) - )) + self.0 + .push((field.name().into(), Some(WasmValue::from(format_args!("{:?}", value))))) } fn record_i64(&mut self, field: &tracing_core::field::Field, value: i64) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } fn record_u64(&mut self, field: &tracing_core::field::Field, value: u64) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } fn record_bool(&mut self, field: &tracing_core::field::Field, value: bool) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } fn record_str(&mut self, field: &tracing_core::field::Field, value: &str) { - self.0.push( ( - field.name().into(), - Some(WasmValue::from(value)) - )) + self.0.push((field.name().into(), Some(WasmValue::from(value)))) } } /// Metadata provides generic information about the specifc location of the @@ -386,7 +352,7 @@ impl From<&tracing_core::Metadata<'_>> for WasmMetadata { line: wm.line().unwrap_or_default(), module_path: wm.module_path().map(|m| m.as_bytes().to_vec()).unwrap_or_default(), is_span: wm.is_span(), - fields: wm.fields().into() + fields: wm.fields().into(), } } } @@ -417,12 +383,11 @@ impl core::default::Default for WasmMetadata { line: Default::default(), module_path: Default::default(), is_span: true, - fields: WasmFields::empty() + fields: WasmFields::empty(), } } } - fn decode_field(field: &[u8]) -> &str { core::str::from_utf8(field).unwrap_or_default() } @@ -445,7 +410,7 @@ impl From<&tracing_core::Event<'_>> for WasmEntryAttributes { WasmEntryAttributes { parent_id: evt.parent().map(|id| id.into_u64()), metadata: evt.metadata().into(), - fields + fields, } } } @@ -457,7 +422,7 @@ impl From<&tracing_core::span::Attributes<'_>> for WasmEntryAttributes { WasmEntryAttributes { parent_id: attrs.parent().map(|id| id.into_u64()), metadata: attrs.metadata().into(), - fields + fields, } } } @@ -480,10 +445,14 @@ mod std_features { /// Static entry use for wasm-originated metadata. pub struct WasmCallsite; impl callsite::Callsite for WasmCallsite { - fn set_interest(&self, _: tracing_core::Interest) { unimplemented!() } - fn metadata(&self) -> &tracing_core::Metadata { unimplemented!() } + fn set_interest(&self, _: tracing_core::Interest) { + unimplemented!() + } + fn metadata(&self) -> &tracing_core::Metadata { + unimplemented!() + } } - static CALLSITE: WasmCallsite = WasmCallsite; + static CALLSITE: WasmCallsite = WasmCallsite; /// The identifier we are using to inject the wasm events in the generic `tracing` system pub static WASM_TRACE_IDENTIFIER: &str = "wasm_tracing"; /// The fieldname for the wasm-originated name @@ -491,8 +460,8 @@ mod std_features { /// The fieldname for the wasm-originated target pub static WASM_TARGET_KEY: &str = "target"; /// The the list of all static field names we construct from the given metadata - pub static GENERIC_FIELDS: &[&str] = &[WASM_TARGET_KEY, WASM_NAME_KEY, - "file", "line", "module_path", "params"]; + pub static GENERIC_FIELDS: &[&str] = + &[WASM_TARGET_KEY, WASM_NAME_KEY, "file", "line", "module_path", "params"]; // Implementation Note: // the original `tracing` crate generates these static metadata entries at every `span!` and @@ -500,67 +469,147 @@ mod std_features { // of wasm events we need these static metadata entries to inject into that system. We then provide // generic `From`-implementations picking the right metadata to refer to. - static SPAN_ERROR_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::ERROR, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_ERROR_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::ERROR, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_WARN_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::WARN, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_WARN_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::WARN, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_INFO_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::INFO, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_INFO_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::INFO, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_DEBUG_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::DEBUG, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_DEBUG_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::DEBUG, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static SPAN_TRACE_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::TRACE, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::SPAN + static SPAN_TRACE_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::TRACE, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::SPAN, ); - static EVENT_ERROR_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::ERROR, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_ERROR_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::ERROR, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_WARN_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::WARN, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_WARN_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::WARN, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_INFO_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::INFO, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_INFO_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::INFO, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_DEBUG_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::DEBUG, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_DEBUG_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::DEBUG, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); - static EVENT_TRACE_METADATA : tracing_core::Metadata<'static> = tracing::Metadata::new( - WASM_TRACE_IDENTIFIER, WASM_TRACE_IDENTIFIER, tracing::Level::TRACE, None, None, None, - tracing_core::field::FieldSet::new(GENERIC_FIELDS, tracing_core::identify_callsite!(&CALLSITE)), - tracing_core::metadata::Kind::EVENT + static EVENT_TRACE_METADATA: tracing_core::Metadata<'static> = tracing::Metadata::new( + WASM_TRACE_IDENTIFIER, + WASM_TRACE_IDENTIFIER, + tracing::Level::TRACE, + None, + None, + None, + tracing_core::field::FieldSet::new( + GENERIC_FIELDS, + tracing_core::identify_callsite!(&CALLSITE), + ), + tracing_core::metadata::Kind::EVENT, ); // FIXME: this could be done a lot in 0.2 if they opt for using `Cow` instead - // https://github.com/paritytech/substrate/issues/7134 + // https://github.com/paritytech/substrate/issues/7134 impl From<&crate::WasmMetadata> for &'static tracing_core::Metadata<'static> { fn from(wm: &crate::WasmMetadata) -> &'static tracing_core::Metadata<'static> { match (&wm.level, wm.is_span) { @@ -586,12 +635,12 @@ mod std_features { let line = a.metadata.line; let module_path = std::str::from_utf8(&a.metadata.module_path).unwrap_or_default(); let params = a.fields; - let metadata : &tracing_core::metadata::Metadata<'static> = (&a.metadata).into(); + let metadata: &tracing_core::metadata::Metadata<'static> = (&a.metadata).into(); tracing::span::Span::child_of( a.parent_id.map(tracing_core::span::Id::from_u64), &metadata, - &tracing::valueset!{ metadata.fields(), target, name, file, line, module_path, ?params } + &tracing::valueset! { metadata.fields(), target, name, file, line, module_path, ?params }, ) } } @@ -605,12 +654,12 @@ mod std_features { let line = self.metadata.line; let module_path = std::str::from_utf8(&self.metadata.module_path).unwrap_or_default(); let params = self.fields; - let metadata : &tracing_core::metadata::Metadata<'static> = (&self.metadata).into(); + let metadata: &tracing_core::metadata::Metadata<'static> = (&self.metadata).into(); tracing_core::Event::child_of( self.parent_id.map(tracing_core::span::Id::from_u64), &metadata, - &tracing::valueset!{ metadata.fields(), target, name, file, line, module_path, ?params } + &tracing::valueset! { metadata.fields(), target, name, file, line, module_path, ?params }, ) } } diff --git a/substrate/primitives/transaction-pool/src/runtime_api.rs b/substrate/primitives/transaction-pool/src/runtime_api.rs index 42542d9f3c8b42f6c520b6d6bacd177517da9ef8..be631ee03b9d76ade2731baa161dc9e268eb9ff9 100644 --- a/substrate/primitives/transaction-pool/src/runtime_api.rs +++ b/substrate/primitives/transaction-pool/src/runtime_api.rs @@ -17,8 +17,10 @@ //! Tagged Transaction Queue Runtime API. -use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::{ + traits::Block as BlockT, + transaction_validity::{TransactionSource, TransactionValidity}, +}; sp_api::decl_runtime_apis! { /// The `TaggedTransactionQueue` api trait for interfering with the transaction queue. diff --git a/substrate/primitives/transaction-storage-proof/src/lib.rs b/substrate/primitives/transaction-storage-proof/src/lib.rs index 0deee8691ff8ec8d7388e2422a72fed27f1e8d7a..864d6d4084a83de199de39fc0eb6940c62e1fcde 100644 --- a/substrate/primitives/transaction-storage-proof/src/lib.rs +++ b/substrate/primitives/transaction-storage-proof/src/lib.rs @@ -20,11 +20,11 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result::Result, prelude::*}; +use sp_std::{prelude::*, result::Result}; -use codec::{Encode, Decode}; -use sp_inherents::{InherentIdentifier, InherentData, IsFatalError}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}}; +use codec::{Decode, Encode}; +use sp_inherents::{InherentData, InherentIdentifier, IsFatalError}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sp_inherents::Error; @@ -40,7 +40,7 @@ pub const CHUNK_SIZE: usize = 256; #[cfg_attr(feature = "std", derive(Decode))] pub enum InherentError { InvalidProof, - TrieError + TrieError, } impl IsFatalError for InherentError { @@ -130,26 +130,20 @@ pub trait IndexedBody { /// /// Note that this will only fetch transactions /// that are indexed by the runtime with `storage_index_transaction`. - fn block_indexed_body( - &self, - number: NumberFor, - ) -> Result>>, Error>; + fn block_indexed_body(&self, number: NumberFor) -> Result>>, Error>; /// Get block number for a block hash. - fn number( - &self, - hash: B::Hash, - ) -> Result>, Error>; + fn number(&self, hash: B::Hash) -> Result>, Error>; } #[cfg(feature = "std")] pub mod registration { - use sp_runtime::{traits::{Block as BlockT, Saturating, Zero, One}}; - use sp_trie::TrieMut; use super::*; + use sp_runtime::traits::{Block as BlockT, One, Saturating, Zero}; + use sp_trie::TrieMut; type Hasher = sp_core::Blake2Hasher; - type TrieLayout = sp_trie::Layout::; + type TrieLayout = sp_trie::Layout; /// Create a new inherent data provider instance for a given parent block hash. pub fn new_data_provider( @@ -166,25 +160,24 @@ pub mod registration { .saturating_sub(DEFAULT_STORAGE_PERIOD.into()); if number.is_zero() { // Too early to collect proofs. - return Ok(InherentDataProvider::new(None)); + return Ok(InherentDataProvider::new(None)) } let proof = match client.block_indexed_body(number)? { - Some(transactions) => { - Some(build_proof(parent.as_ref(), transactions)?) - }, + Some(transactions) => Some(build_proof(parent.as_ref(), transactions)?), None => { // Nothing was indexed in that block. None - } + }, }; Ok(InherentDataProvider::new(proof)) } /// Build a proof for a given source of randomness and indexed transactions. - pub fn build_proof(random_hash: &[u8], transactions: Vec>) - -> Result - { + pub fn build_proof( + random_hash: &[u8], + transactions: Vec>, + ) -> Result { let mut db = sp_trie::MemoryDB::::default(); let mut target_chunk = None; @@ -192,22 +185,25 @@ pub mod registration { let mut target_chunk_key = Default::default(); let mut chunk_proof = Default::default(); - let total_chunks: u64 = transactions.iter().map(|t| ((t.len() + CHUNK_SIZE - 1) / CHUNK_SIZE) as u64).sum(); + let total_chunks: u64 = transactions + .iter() + .map(|t| ((t.len() + CHUNK_SIZE - 1) / CHUNK_SIZE) as u64) + .sum(); let mut buf = [0u8; 8]; buf.copy_from_slice(&random_hash[0..8]); let random_u64 = u64::from_be_bytes(buf); let target_chunk_index = random_u64 % total_chunks; - //Generate tries for each transaction. + // Generate tries for each transaction. let mut chunk_index = 0; for transaction in transactions { let mut transaction_root = sp_trie::empty_trie_root::(); { - let mut trie = sp_trie::TrieDBMut::::new(&mut db, &mut transaction_root); + let mut trie = + sp_trie::TrieDBMut::::new(&mut db, &mut transaction_root); let chunks = transaction.chunks(CHUNK_SIZE).map(|c| c.to_vec()); for (index, chunk) in chunks.enumerate() { let index = encode_index(index as u32); - trie.insert(&index, &chunk) - .map_err(|e| Error::Application(Box::new(e)))?; + trie.insert(&index, &chunk).map_err(|e| Error::Application(Box::new(e)))?; if chunk_index == target_chunk_index { target_chunk = Some(chunk); target_chunk_key = index; @@ -221,15 +217,13 @@ pub mod registration { chunk_proof = sp_trie::generate_trie_proof::( &db, transaction_root.clone(), - &[target_chunk_key.clone()] - ).map_err(|e| Error::Application(Box::new(e)))?; + &[target_chunk_key.clone()], + ) + .map_err(|e| Error::Application(Box::new(e)))?; } - }; + } - Ok(TransactionStorageProof { - proof: chunk_proof, - chunk: target_chunk.unwrap(), - }) + Ok(TransactionStorageProof { proof: chunk_proof, chunk: target_chunk.unwrap() }) } #[test] @@ -237,11 +231,15 @@ pub mod registration { use std::str::FromStr; let random = [0u8; 32]; let proof = build_proof(&random, vec![vec![42]]).unwrap(); - let root = sp_core::H256::from_str("0xff8611a4d212fc161dae19dd57f0f1ba9309f45d6207da13f2d3eab4c6839e91").unwrap(); + let root = sp_core::H256::from_str( + "0xff8611a4d212fc161dae19dd57f0f1ba9309f45d6207da13f2d3eab4c6839e91", + ) + .unwrap(); sp_trie::verify_trie_proof::( &root, &proof.proof, &[(encode_index(0), Some(proof.chunk))], - ).unwrap(); + ) + .unwrap(); } } diff --git a/substrate/primitives/trie/benches/bench.rs b/substrate/primitives/trie/benches/bench.rs index c2ccb31328aaea6201f5d6008cfa617207bad4eb..8c84c6354f2c3071d0ffdeeceb47dfd0ea8a79a8 100644 --- a/substrate/primitives/trie/benches/bench.rs +++ b/substrate/primitives/trie/benches/bench.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use criterion::{Criterion, criterion_group, criterion_main}; +use criterion::{criterion_group, criterion_main, Criterion}; criterion_group!(benches, benchmark); criterion_main!(benches); diff --git a/substrate/primitives/trie/src/error.rs b/substrate/primitives/trie/src/error.rs index bdaa49b1156f70756f5ddd3a5469437fc67c76ea..30a164c614755dc60f23cb3e09d35e4ae7831070 100644 --- a/substrate/primitives/trie/src/error.rs +++ b/substrate/primitives/trie/src/error.rs @@ -15,10 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#[cfg(feature="std")] -use std::fmt; -#[cfg(feature="std")] +#[cfg(feature = "std")] use std::error::Error as StdError; +#[cfg(feature = "std")] +use std::fmt; #[derive(Debug, PartialEq, Eq, Clone)] /// Error for trie node decoding. @@ -35,7 +35,7 @@ impl From for Error { } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl StdError for Error { fn description(&self) -> &str { match self { @@ -45,7 +45,7 @@ impl StdError for Error { } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { diff --git a/substrate/primitives/trie/src/lib.rs b/substrate/primitives/trie/src/lib.rs index a496245637a523285d754395a6cca085384da06f..8ba13284d379f3423f1e00cd88b923de0d888cc3 100644 --- a/substrate/primitives/trie/src/lib.rs +++ b/substrate/primitives/trie/src/lib.rs @@ -20,35 +20,36 @@ #![cfg_attr(not(feature = "std"), no_std)] mod error; -mod node_header; mod node_codec; +mod node_header; mod storage_proof; mod trie_codec; mod trie_stream; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec, borrow::Borrow}; -use hash_db::{Hasher, Prefix}; -use trie_db::proof::{generate_proof, verify_proof}; -pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. pub use error::Error; -/// The Substrate format implementation of `TrieStream`. -pub use trie_stream::TrieStream; -/// The Substrate format implementation of `NodeCodec`. -pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, CompactProof}; -/// Various re-exports from the `trie-db` crate. -pub use trie_db::{ - Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, -}; -/// Various re-exports from the `memory-db` crate. -pub use memory_db::KeyFunction; -pub use memory_db::prefixed_key; /// Various re-exports from the `hash-db` crate. pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +use hash_db::{Hasher, Prefix}; +pub use memory_db::prefixed_key; +/// Various re-exports from the `memory-db` crate. +pub use memory_db::KeyFunction; +/// The Substrate format implementation of `NodeCodec`. +pub use node_codec::NodeCodec; +use sp_std::{borrow::Borrow, boxed::Box, marker::PhantomData, vec::Vec}; +pub use storage_proof::{CompactProof, StorageProof}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; +pub use trie_db::proof::VerifyError; +use trie_db::proof::{generate_proof, verify_proof}; +/// Various re-exports from the `trie-db` crate. +pub use trie_db::{ + nibble_ops, CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, + TrieLayout, TrieMut, +}; +/// The Substrate format implementation of `TrieStream`. +pub use trie_stream::TrieStream; #[derive(Default)] /// substrate trie layout @@ -62,7 +63,8 @@ impl TrieLayout for Layout { } impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where + fn trie_root(input: I) -> ::Out + where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -70,7 +72,8 @@ impl TrieConfiguration for Layout { trie_root::trie_root_no_extension::(input) } - fn trie_root_unhashed(input: I) -> Vec where + fn trie_root_unhashed(input: I) -> Vec + where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -98,19 +101,14 @@ pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a `KeyFunction` for prefixing keys internally (avoiding /// key conflict for non random keys). -pub type PrefixedMemoryDB = memory_db::MemoryDB< - H, memory_db::PrefixedKey, trie_db::DBValue, MemTracker ->; +pub type PrefixedMemoryDB = + memory_db::MemoryDB, trie_db::DBValue, MemTracker>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a noops `KeyFunction` (key addressing must be hashed or using /// an encoding scheme that avoid key conflict). -pub type MemoryDB = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, MemTracker, ->; +pub type MemoryDB = memory_db::MemoryDB, trie_db::DBValue, MemTracker>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type GenericMemoryDB = memory_db::MemoryDB< - H, KF, trie_db::DBValue, MemTracker ->; +pub type GenericMemoryDB = memory_db::MemoryDB; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, L> = trie_db::TrieDB<'a, L>; @@ -147,8 +145,9 @@ pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( db: &DB, root: TrieHash, keys: I, -) -> Result>, Box>> where - I: IntoIterator, +) -> Result>, Box>> +where + I: IntoIterator, K: 'a + AsRef<[u8]>, DB: hash_db::HashDBRef, { @@ -168,8 +167,9 @@ pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( root: &TrieHash, proof: &[Vec], items: I, -) -> Result<(), VerifyError, error::Error>> where - I: IntoIterator)>, +) -> Result<(), VerifyError, error::Error>> +where + I: IntoIterator)>, K: 'a + AsRef<[u8]>, V: 'a + AsRef<[u8]>, { @@ -180,8 +180,9 @@ pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, - delta: I -) -> Result, Box>> where + delta: I, +) -> Result, Box>> +where I: IntoIterator, A: Borrow<[u8]>, B: Borrow>, @@ -209,7 +210,7 @@ pub fn delta_trie_root( pub fn read_trie_value>( db: &DB, root: &TrieHash, - key: &[u8] + key: &[u8], ) -> Result>, Box>> { TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec())) } @@ -217,15 +218,17 @@ pub fn read_trie_value, - DB: hash_db::HashDBRef + Q: Query, + DB: hash_db::HashDBRef, >( db: &DB, root: &TrieHash, key: &[u8], - query: Q + query: Q, ) -> Result>, Box>> { - TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec())) + TrieDB::::new(&*db, root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec())) } /// Determine the empty trie root. @@ -240,13 +243,11 @@ pub fn empty_child_trie_root() -> ::Out /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - input: I, -) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, +pub fn child_trie_root(input: I) -> ::Out +where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, { L::trie_root(input) } @@ -259,33 +260,30 @@ pub fn child_delta_trie_root( root_data: RD, delta: I, ) -> Result<::Out, Box>> - where - I: IntoIterator, - A: Borrow<[u8]>, - B: Borrow>, - V: Borrow<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB +where + I: IntoIterator, + A: Borrow<[u8]>, + B: Borrow>, + V: Borrow<[u8]>, + RD: AsRef<[u8]>, + DB: hash_db::HashDB, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - delta_trie_root::( - &mut db, - root, - delta, - ) + delta_trie_root::(&mut db, root, delta) } /// Record all keys for a given root. pub fn record_all_keys( db: &DB, root: &TrieHash, - recorder: &mut Recorder> -) -> Result<(), Box>> where - DB: hash_db::HashDBRef + recorder: &mut Recorder>, +) -> Result<(), Box>> +where + DB: hash_db::HashDBRef, { let trie = TrieDB::::new(&*db, root)?; let iter = trie.iter()?; @@ -307,10 +305,10 @@ pub fn read_child_trie_value( keyspace: &[u8], db: &DB, root_slice: &[u8], - key: &[u8] + key: &[u8], ) -> Result>, Box>> - where - DB: hash_db::HashDBRef +where + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -321,22 +319,24 @@ pub fn read_child_trie_value( } /// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( +pub fn read_child_trie_value_with, DB>( keyspace: &[u8], db: &DB, root_slice: &[u8], key: &[u8], - query: Q + query: Q, ) -> Result>, Box>> - where - DB: hash_db::HashDBRef +where + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_slice); let db = KeySpacedDB::new(&*db, keyspace); - TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec())) + TrieDB::::new(&db, &root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec())) } /// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the @@ -358,7 +358,8 @@ fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) (result, prefix.1) } -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where +impl<'a, DB, H> KeySpacedDB<'a, DB, H> +where H: Hasher, { /// instantiate new keyspaced db @@ -367,7 +368,8 @@ impl<'a, DB, H> KeySpacedDB<'a, DB, H> where } } -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where +impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> +where H: Hasher, { /// instantiate new keyspaced db @@ -376,7 +378,8 @@ impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where +impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> +where DB: hash_db::HashDBRef, H: Hasher, T: From<&'static [u8]>, @@ -392,7 +395,8 @@ impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where +impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> +where DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, @@ -423,12 +427,15 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where +impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> +where DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { + &*self + } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { &mut *self @@ -447,12 +454,12 @@ mod trie_constants { #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode, Compact}; - use sp_core::Blake2Hasher; + use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; - use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; - use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; + use sp_core::Blake2Hasher; + use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; + use trie_standardmap::{Alphabet, StandardMap, ValueMode}; type Layout = super::Layout; @@ -491,7 +498,8 @@ mod tests { let t = TrieDB::::new(&mut memdb, &root).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), - t.iter().unwrap() + t.iter() + .unwrap() .map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()) .collect::>() ); @@ -505,9 +513,11 @@ mod tests { let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = Layout::trie_root::<_, Vec, Vec>( - std::iter::empty(), - ).as_ref().iter().cloned().collect(); + let root2: Vec = Layout::trie_root::<_, Vec, Vec>(std::iter::empty()) + .as_ref() + .iter() + .cloned() + .collect(); assert_eq!(root1, root2); } @@ -528,20 +538,16 @@ mod tests { #[test] fn branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xba][..], &[0x11][..]), - ]; + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..])]; check_equivalent::(&input); check_iteration::(&input); } #[test] fn extension_and_branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xab][..], &[0x11][..]), - ]; + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..])]; check_equivalent::(&input); check_iteration::(&input); } @@ -567,7 +573,7 @@ mod tests { let input: Vec<(&[u8], &[u8])> = vec![ (&[0xaa][..], &[0xa0][..]), (&[0xaa, 0xaa][..], &[0xaa][..]), - (&[0xaa, 0xbb][..], &[0xab][..]) + (&[0xaa, 0xbb][..], &[0xab][..]), ]; check_equivalent::(&input); check_iteration::(&input); @@ -590,7 +596,10 @@ mod tests { #[test] fn single_long_leaf_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), (&[0xba][..], &[0x11][..]), ]; check_equivalent::(&input); @@ -600,8 +609,14 @@ mod tests { #[test] fn two_long_leaves_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), - (&[0xba][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]) + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + ( + &[0xba][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), ]; check_equivalent::(&input); check_iteration::(&input); @@ -610,11 +625,11 @@ mod tests { fn populate_trie<'db, T: TrieConfiguration>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, - v: &[(Vec, Vec)] + v: &[(Vec, Vec)], ) -> TrieDBMut<'db, T> { let mut t = TrieDBMut::::new(db, root); for i in 0..v.len() { - let key: &[u8]= &v[i].0; + let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; t.insert(key, val).unwrap(); } @@ -626,7 +641,7 @@ mod tests { v: &[(Vec, Vec)], ) { for i in v { - let key: &[u8]= &i.0; + let key: &[u8] = &i.0; t.remove(key).unwrap(); } } @@ -644,7 +659,8 @@ mod tests { journal_key: 0, value_mode: ValueMode::Index, count: 100, - }.make_with(seed.as_fixed_bytes_mut()); + } + .make_with(seed.as_fixed_bytes_mut()); let real = Layout::trie_root(x.clone()); let mut memdb = MemoryDB::default(); @@ -690,17 +706,18 @@ mod tests { #[test] fn codec_trie_single_tuple() { - let input = vec![ - (vec![0xaa], vec![0xbb]) - ]; + let input = vec![(vec![0xaa], vec![0xbb])]; let trie = Layout::trie_root_unhashed::<_, _, _>(input); println!("trie: {:#x?}", trie); - assert_eq!(trie, vec![ - 0x42, // leaf 0x40 (2^6) with (+) key of 2 nibbles (0x02) - 0xaa, // key data - to_compact(1), // length of value in bytes as Compact - 0xbb // value data - ]); + assert_eq!( + trie, + vec![ + 0x42, // leaf 0x40 (2^6) with (+) key of 2 nibbles (0x02) + 0xaa, // key data + to_compact(1), // length of value in bytes as Compact + 0xbb // value data + ] + ); } #[test] @@ -709,21 +726,21 @@ mod tests { let trie = Layout::trie_root_unhashed::<_, _, _>(input); println!("trie: {:#x?}", trie); let mut ex = Vec::::new(); - ex.push(0x80); // branch, no value (0b_10..) no nibble - ex.push(0x12); // slots 1 & 4 are taken from 0-7 - ex.push(0x00); // no slots from 8-15 - ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. - ex.push(0x43); // leaf 0x40 with 3 nibbles - ex.push(0x03); // first nibble - ex.push(0x14); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xff); // value data - ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. - ex.push(0x43); // leaf with 3 nibbles - ex.push(0x08); // first nibble - ex.push(0x19); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xfe); // value data + ex.push(0x80); // branch, no value (0b_10..) no nibble + ex.push(0x12); // slots 1 & 4 are taken from 0-7 + ex.push(0x00); // no slots from 8-15 + ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. + ex.push(0x43); // leaf 0x40 with 3 nibbles + ex.push(0x03); // first nibble + ex.push(0x14); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xff); // value data + ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. + ex.push(0x43); // leaf with 3 nibbles + ex.push(0x08); // first nibble + ex.push(0x19); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xfe); // value data assert_eq!(trie, ex); } @@ -763,27 +780,25 @@ mod tests { populate_trie::(&mut memdb, &mut root, &pairs); let non_included_key: Vec = hex!("0909").to_vec(); - let proof = generate_trie_proof::( - &memdb, - root, - &[non_included_key.clone()] - ).unwrap(); + let proof = + generate_trie_proof::(&memdb, root, &[non_included_key.clone()]) + .unwrap(); // Verifying that the K was not included into the trie should work. assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key.clone(), None)], - ).is_ok() - ); + &root, + &proof, + &[(non_included_key.clone(), None)], + ) + .is_ok()); // Verifying that the K was included into the trie should fail. assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key, Some(hex!("1010").to_vec()))], - ).is_err() - ); + &root, + &proof, + &[(non_included_key, Some(hex!("1010").to_vec()))], + ) + .is_err()); } #[test] @@ -797,71 +812,71 @@ mod tests { let mut root = Default::default(); populate_trie::(&mut memdb, &mut root, &pairs); - let proof = generate_trie_proof::( - &memdb, - root, - &[pairs[0].0.clone()] - ).unwrap(); + let proof = + generate_trie_proof::(&memdb, root, &[pairs[0].0.clone()]).unwrap(); // Check that a K, V included into the proof are verified. assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] - ).is_ok() - ); + &root, + &proof, + &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] + ) + .is_ok()); // Absence of the V is not verified with the proof that has K, V included. assert!(verify_trie_proof::>( - &root, - &proof, - &[(pairs[0].0.clone(), None)] - ).is_err() - ); + &root, + &proof, + &[(pairs[0].0.clone(), None)] + ) + .is_err()); // K not included into the trie is not verified. assert!(verify_trie_proof::( - &root, - &proof, - &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] - ).is_err() - ); + &root, + &proof, + &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] + ) + .is_err()); // K included into the trie but not included into the proof is not verified. assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] - ).is_err() - ); + &root, + &proof, + &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] + ) + .is_err()); } #[test] fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); - let storage_root = sp_core::H256::decode( - &mut &include_bytes!("../test-res/storage_root")[..], - ).unwrap(); + let storage_root = + sp_core::H256::decode(&mut &include_bytes!("../test-res/storage_root")[..]).unwrap(); // Delta order that is "invalid" so that it would require a different proof. let invalid_delta = Vec::<(Vec, Option>)>::decode( &mut &include_bytes!("../test-res/invalid-delta-order")[..], - ).unwrap(); + ) + .unwrap(); // Delta order that is "valid" let valid_delta = Vec::<(Vec, Option>)>::decode( &mut &include_bytes!("../test-res/valid-delta-order")[..], - ).unwrap(); + ) + .unwrap(); let proof_db = proof.into_memory_db::(); let first_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, valid_delta, - ).unwrap(); + ) + .unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, - ).unwrap(); + ) + .unwrap(); assert_eq!(first_storage_root, second_storage_root); } diff --git a/substrate/primitives/trie/src/node_codec.rs b/substrate/primitives/trie/src/node_codec.rs index 296f03972c795e827486ac0b408f34cbb282cb07..d5ffb3219cf68aba071ff40a22dcdd36fc93ed17 100644 --- a/substrate/primitives/trie/src/node_codec.rs +++ b/substrate/primitives/trie/src/node_codec.rs @@ -17,17 +17,16 @@ //! `NodeCodec` implementation for Substrate's trie format. -use sp_std::marker::PhantomData; -use sp_std::ops::Range; -use sp_std::vec::Vec; -use sp_std::borrow::Borrow; -use codec::{Encode, Decode, Input, Compact}; +use super::node_header::{NodeHeader, NodeKind}; +use crate::{error::Error, trie_constants}; +use codec::{Compact, Decode, Encode, Input}; use hash_db::Hasher; -use trie_db::{self, node::{NibbleSlicePlan, NodePlan, NodeHandlePlan}, ChildReference, - nibble_ops, Partial, NodeCodec as NodeCodecT}; -use crate::error::Error; -use crate::trie_constants; -use super::{node_header::{NodeHeader, NodeKind}}; +use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; +use trie_db::{ + self, nibble_ops, + node::{NibbleSlicePlan, NodeHandlePlan, NodePlan}, + ChildReference, NodeCodec as NodeCodecT, Partial, +}; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while /// tracking the absolute position. This is similar to `std::io::Cursor` but does not implement @@ -39,15 +38,12 @@ struct ByteSliceInput<'a> { impl<'a> ByteSliceInput<'a> { fn new(data: &'a [u8]) -> Self { - ByteSliceInput { - data, - offset: 0, - } + ByteSliceInput { data, offset: 0 } } fn take(&mut self, count: usize) -> Result, codec::Error> { if self.offset + count > self.data.len() { - return Err("out of data".into()); + return Err("out of data".into()) } let range = self.offset..(self.offset + count); @@ -58,11 +54,8 @@ impl<'a> ByteSliceInput<'a> { impl<'a> Input for ByteSliceInput<'a> { fn remaining_len(&mut self) -> Result, codec::Error> { - let remaining = if self.offset <= self.data.len() { - Some(self.data.len() - self.offset) - } else { - None - }; + let remaining = + if self.offset <= self.data.len() { Some(self.data.len() - self.offset) } else { None }; Ok(remaining) } @@ -74,7 +67,7 @@ impl<'a> Input for ByteSliceInput<'a> { fn read_byte(&mut self) -> Result { if self.offset + 1 > self.data.len() { - return Err("out of data".into()); + return Err("out of data".into()) } let byte = self.data[self.offset]; @@ -103,10 +96,11 @@ impl NodeCodecT for NodeCodec { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); + return Err(Error::BadFormat) } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / + nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let bitmap_range = input.take(BITMAP_LENGTH)?; @@ -118,8 +112,8 @@ impl NodeCodecT for NodeCodec { None }; let mut children = [ - None, None, None, None, None, None, None, None, - None, None, None, None, None, None, None, None, + None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, ]; for i in 0..nibble_ops::NIBBLE_LENGTH { if bitmap.value_at(i) { @@ -137,15 +131,16 @@ impl NodeCodecT for NodeCodec { value, children, }) - } + }, NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); + return Err(Error::BadFormat) } let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / + nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); let count = >::decode(&mut input)?.0 as usize; @@ -153,7 +148,7 @@ impl NodeCodecT for NodeCodec { partial: NibbleSlicePlan::new(partial, partial_padding), value: input.take(count)?, }) - } + }, } } @@ -199,26 +194,28 @@ impl NodeCodecT for NodeCodec { }; let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; - (0..BITMAP_LENGTH).for_each(|_|output.push(0)); + (0..BITMAP_LENGTH).for_each(|_| output.push(0)); if let Some(value) = maybe_value { value.encode_to(&mut output); }; - Bitmap::encode(children.map(|maybe_child| match maybe_child.borrow() { - Some(ChildReference::Hash(h)) => { - h.as_ref().encode_to(&mut output); - true - } - &Some(ChildReference::Inline(inline_data, len)) => { - inline_data.as_ref()[..len].encode_to(&mut output); - true - } - None => false, - }), bitmap.as_mut()); + Bitmap::encode( + children.map(|maybe_child| match maybe_child.borrow() { + Some(ChildReference::Hash(h)) => { + h.as_ref().encode_to(&mut output); + true + }, + &Some(ChildReference::Inline(inline_data, len)) => { + inline_data.as_ref()[..len].encode_to(&mut output); + true + }, + None => false, + }), + bitmap.as_mut(), + ); output[bitmap_index..bitmap_index + BITMAP_LENGTH] .copy_from_slice(&bitmap[..BITMAP_LENGTH]); output } - } // utils @@ -280,11 +277,13 @@ impl Bitmap { self.0 & (1u16 << i) != 0 } - pub fn encode>(has_children: I , dest: &mut [u8]) { + pub fn encode>(has_children: I, dest: &mut [u8]) { let mut bitmap: u16 = 0; let mut cursor: u16 = 1; for v in has_children { - if v { bitmap |= cursor } + if v { + bitmap |= cursor + } cursor <<= 1; } dest[0] = (bitmap % 256) as u8; diff --git a/substrate/primitives/trie/src/node_header.rs b/substrate/primitives/trie/src/node_header.rs index 0fdf6fefbd0bcc1a64cbc92f1cbd3683c70f5906..9f05113a359351d7595bf1d670a5c068af8eb7cf 100644 --- a/substrate/primitives/trie/src/node_header.rs +++ b/substrate/primitives/trie/src/node_header.rs @@ -18,12 +18,11 @@ //! The node header. use crate::trie_constants; -use codec::{Encode, Decode, Input, Output}; +use codec::{Decode, Encode, Input, Output}; use sp_std::iter::once; /// A node header -#[derive(Copy, Clone, PartialEq, Eq)] -#[derive(sp_core::RuntimeDebug)] +#[derive(Copy, Clone, PartialEq, Eq, sp_core::RuntimeDebug)] pub(crate) enum NodeHeader { Null, Branch(bool, usize), @@ -41,7 +40,7 @@ impl Encode for NodeHeader { fn encode_to(&self, output: &mut T) { match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), - NodeHeader::Branch(true, nibble_count) => + NodeHeader::Branch(true, nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, output), NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output), @@ -57,12 +56,14 @@ impl Decode for NodeHeader { fn decode(input: &mut I) -> Result { let i = input.read_byte()?; if i == trie_constants::EMPTY_TRIE { - return Ok(NodeHeader::Null); + return Ok(NodeHeader::Null) } match i & (0b11 << 6) { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input)?)), - trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input)?)), - trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input)?)), + trie_constants::BRANCH_WITHOUT_MASK => + Ok(NodeHeader::Branch(false, decode_size(i, input)?)), + trie_constants::BRANCH_WITH_MASK => + Ok(NodeHeader::Branch(true, decode_size(i, input)?)), // do not allow any special encoding _ => Err("Unallowed encoding".into()), } @@ -76,11 +77,8 @@ pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); let l1 = sp_std::cmp::min(62, size); - let (first_byte, mut rem) = if size == l1 { - (once(prefix + l1 as u8), 0) - } else { - (once(prefix + 63), size - l1) - }; + let (first_byte, mut rem) = + if size == l1 { (once(prefix + l1 as u8), 0) } else { (once(prefix + 63), size - l1) }; let next_bytes = move || { if rem > 0 { if rem < 256 { @@ -109,13 +107,13 @@ fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut fn decode_size(first: u8, input: &mut impl Input) -> Result { let mut result = (first & 255u8 >> 2) as usize; if result < 63 { - return Ok(result); + return Ok(result) } result -= 1; while result <= trie_constants::NIBBLE_SIZE_BOUND { let n = input.read_byte()? as usize; if n < 255 { - return Ok(result + n + 1); + return Ok(result + n + 1) } result += 255; } diff --git a/substrate/primitives/trie/src/storage_proof.rs b/substrate/primitives/trie/src/storage_proof.rs index 03668920509b8110beeb6e852ea81f32c1b102f6..b4e4b393a71abc377e5f995ed64c9a613981aae4 100644 --- a/substrate/primitives/trie/src/storage_proof.rs +++ b/substrate/primitives/trie/src/storage_proof.rs @@ -15,9 +15,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use codec::{Decode, Encode}; +use hash_db::{HashDB, Hasher}; use sp_std::vec::Vec; -use codec::{Encode, Decode}; -use hash_db::{Hasher, HashDB}; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -48,9 +48,7 @@ impl StorageProof { /// An empty proof is capable of only proving trivial statements (ie. that an empty set of /// key-value pairs exist in storage). pub fn empty() -> Self { - StorageProof { - trie_nodes: Vec::new(), - } + StorageProof { trie_nodes: Vec::new() } } /// Returns whether this is an empty proof. @@ -76,8 +74,12 @@ impl StorageProof { /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. - pub fn merge(proofs: I) -> Self where I: IntoIterator { - let trie_nodes = proofs.into_iter() + pub fn merge(proofs: I) -> Self + where + I: IntoIterator, + { + let trie_nodes = proofs + .into_iter() .flat_map(|proof| proof.iter_nodes()) .collect::>() .into_iter() @@ -94,7 +96,7 @@ impl StorageProof { ) -> Result>> { crate::encode_compact::>(self, root) } - + /// Returns the estimated encoded size of the compact proof. /// /// Runing this operation is a slow operation (build the whole compact proof) and should only be @@ -104,7 +106,6 @@ impl StorageProof { let compact_proof = self.into_compact_proof::(root); compact_proof.ok().map(|p| p.encoded_size()) } - } impl CompactProof { @@ -127,13 +128,15 @@ impl CompactProof { self.iter_compact_encoded_nodes(), expected_root, )?; - Ok((StorageProof::new(db.drain().into_iter().filter_map(|kv| - if (kv.1).1 > 0 { - Some((kv.1).0) - } else { - None - } - ).collect()), root)) + Ok(( + StorageProof::new( + db.drain() + .into_iter() + .filter_map(|kv| if (kv.1).1 > 0 { Some((kv.1).0) } else { None }) + .collect(), + ), + root, + )) } } @@ -145,9 +148,7 @@ pub struct StorageProofNodeIterator { impl StorageProofNodeIterator { fn new(proof: StorageProof) -> Self { - StorageProofNodeIterator { - inner: proof.trie_nodes.into_iter(), - } + StorageProofNodeIterator { inner: proof.trie_nodes.into_iter() } } } diff --git a/substrate/primitives/trie/src/trie_codec.rs b/substrate/primitives/trie/src/trie_codec.rs index efe3223580f3fb4b675ccc1a2d7f49f07f4baba0..ed5724e0455d16cee2a22803d78273cc0f35bbd8 100644 --- a/substrate/primitives/trie/src/trie_codec.rs +++ b/substrate/primitives/trie/src/trie_codec.rs @@ -21,17 +21,14 @@ //! it to substrate specific layout and child trie system. use crate::{ - EMPTY_PREFIX, HashDBT, TrieHash, TrieError, TrieConfiguration, - CompactProof, StorageProof, + CompactProof, HashDBT, StorageProof, TrieConfiguration, TrieError, TrieHash, EMPTY_PREFIX, }; -use sp_std::boxed::Box; -use sp_std::vec::Vec; -use trie_db::Trie; -#[cfg(feature="std")] -use std::fmt; -#[cfg(feature="std")] +use sp_std::{boxed::Box, vec::Vec}; +#[cfg(feature = "std")] use std::error::Error as StdError; - +#[cfg(feature = "std")] +use std::fmt; +use trie_db::Trie; /// Error for trie node decoding. pub enum Error { @@ -55,7 +52,7 @@ impl From>> for Error { } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl StdError for Error { fn description(&self) -> &str { match self { @@ -69,14 +66,14 @@ impl StdError for Error { } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ::fmt(&self, f) } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { @@ -84,7 +81,8 @@ impl fmt::Display for Error { Error::TrieError(e) => write!(f, "Trie error: {}", e), Error::IncompleteProof => write!(f, "Incomplete proof"), Error::ExtraneousChildNode => write!(f, "Child node content with no root in proof"), - Error::ExtraneousChildProof(root) => write!(f, "Proof of child trie {:x?} not in parent proof", root.as_ref()), + Error::ExtraneousChildProof(root) => + write!(f, "Proof of child trie {:x?} not in parent proof", root.as_ref()), Error::RootMismatch(root, expected) => write!( f, "Verification error, root is {:x?}, expected: {:x?}", @@ -107,21 +105,19 @@ pub fn decode_compact<'a, L, DB, I>( encoded: I, expected_root: Option<&TrieHash>, ) -> Result, Error> - where - L: TrieConfiguration, - DB: HashDBT + hash_db::HashDBRef, - I: IntoIterator, +where + L: TrieConfiguration, + DB: HashDBT + hash_db::HashDBRef, + I: IntoIterator, { let mut nodes_iter = encoded.into_iter(); - let (top_root, _nb_used) = trie_db::decode_compact_from_iter::( - db, - &mut nodes_iter, - )?; + let (top_root, _nb_used) = + trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { if expected_root != &top_root { - return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())); + return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())) } } @@ -142,7 +138,7 @@ pub fn decode_compact<'a, L, DB, I>( let mut root = TrieHash::::default(); // still in a proof so prevent panic if root.as_mut().len() != value.as_slice().len() { - return Err(Error::InvalidChildRoot(key, value)); + return Err(Error::InvalidChildRoot(key, value)) } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); @@ -160,16 +156,14 @@ pub fn decode_compact<'a, L, DB, I>( } if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { - return Err(Error::IncompleteProof); + return Err(Error::IncompleteProof) } let mut previous_extracted_child_trie = None; for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() { - let (top_root, _) = trie_db::decode_compact_from_iter::( - db, - &mut nodes_iter, - )?; + let (top_root, _) = + trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; previous_extracted_child_trie = Some(top_root); } @@ -184,11 +178,11 @@ pub fn decode_compact<'a, L, DB, I>( if let Some(child_root) = previous_extracted_child_trie { // A child root was read from proof but is not present // in top trie. - return Err(Error::ExtraneousChildProof(child_root)); + return Err(Error::ExtraneousChildProof(child_root)) } if nodes_iter.next().is_some() { - return Err(Error::ExtraneousChildNode); + return Err(Error::ExtraneousChildNode) } Ok(top_root) @@ -201,12 +195,9 @@ pub fn decode_compact<'a, L, DB, I>( /// Then parse all child trie root and compress main trie content first /// then all child trie contents. /// Child trie are ordered by the order of their roots in the top trie. -pub fn encode_compact( - proof: StorageProof, - root: TrieHash, -) -> Result> - where - L: TrieConfiguration, +pub fn encode_compact(proof: StorageProof, root: TrieHash) -> Result> +where + L: TrieConfiguration, { let mut child_tries = Vec::new(); let partial_db = proof.into_memory_db(); @@ -223,7 +214,7 @@ pub fn encode_compact( let mut root = TrieHash::::default(); if root.as_mut().len() != value.as_slice().len() { // some child trie root in top trie are not an encoded hash. - return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())); + return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())) } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); @@ -246,7 +237,7 @@ pub fn encode_compact( if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). - continue; + continue } let trie = crate::TrieDB::::new(&partial_db, &child_root)?; diff --git a/substrate/primitives/trie/src/trie_stream.rs b/substrate/primitives/trie/src/trie_stream.rs index 3a65c5a9190b4646aee824ed6afcb656c31eef89..e0e26fea67c2ef90a900ecf14d9a3635384e12b9 100644 --- a/substrate/primitives/trie/src/trie_stream.rs +++ b/substrate/primitives/trie/src/trie_stream.rs @@ -17,13 +17,15 @@ //! `TrieStream` implementation for Substrate's trie format. -use hash_db::Hasher; -use trie_root; +use crate::{ + node_codec::Bitmap, + node_header::{size_and_prefix_iterator, NodeKind}, + trie_constants, +}; use codec::Encode; +use hash_db::Hasher; use sp_std::vec::Vec; -use crate::trie_constants; -use crate::node_header::{NodeKind, size_and_prefix_iterator}; -use crate::node_codec::Bitmap; +use trie_root; const BRANCH_NODE_NO_VALUE: u8 = 254; const BRANCH_NODE_WITH_VALUE: u8 = 255; @@ -36,41 +38,42 @@ pub struct TrieStream { impl TrieStream { // useful for debugging but not used otherwise - pub fn as_raw(&self) -> &[u8] { &self.buffer } + pub fn as_raw(&self) -> &[u8] { + &self.buffer + } } fn branch_node_bit_mask(has_children: impl Iterator) -> (u8, u8) { let mut bitmap: u16 = 0; let mut cursor: u16 = 1; for v in has_children { - if v { bitmap |= cursor } + if v { + bitmap |= cursor + } cursor <<= 1; } - ((bitmap % 256 ) as u8, (bitmap / 256 ) as u8) + ((bitmap % 256) as u8, (bitmap / 256) as u8) } - /// Create a leaf/branch node, encoding a number of nibbles. fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator + 'a { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len()); let iter_start = match kind { NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK), - NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK), - NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK), + NodeKind::BranchNoValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK), + NodeKind::BranchWithValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) .chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) } - impl trie_root::TrieStream for TrieStream { - fn new() -> Self { - TrieStream { - buffer: Vec::new() - } + TrieStream { buffer: Vec::new() } } fn append_empty_data(&mut self) { @@ -95,7 +98,7 @@ impl trie_root::TrieStream for TrieStream { self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); } let bm = branch_node_bit_mask(has_children); - self.buffer.extend([bm.0,bm.1].iter()); + self.buffer.extend([bm.0, bm.1].iter()); } else { debug_assert!(false, "trie stream codec only for no extension trie"); self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); @@ -117,7 +120,9 @@ impl trie_root::TrieStream for TrieStream { } } - fn out(self) -> Vec { self.buffer } + fn out(self) -> Vec { + self.buffer + } } fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8; 3] { @@ -126,15 +131,11 @@ fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8 result } -fn branch_node_buffered(has_value: bool, has_children: I, output: &mut[u8]) - where - I: Iterator, +fn branch_node_buffered(has_value: bool, has_children: I, output: &mut [u8]) +where + I: Iterator, { - let first = if has_value { - BRANCH_NODE_WITH_VALUE - } else { - BRANCH_NODE_NO_VALUE - }; + let first = if has_value { BRANCH_NODE_WITH_VALUE } else { BRANCH_NODE_NO_VALUE }; output[0] = first; Bitmap::encode(has_children, &mut output[1..]); } diff --git a/substrate/primitives/utils/src/metrics.rs b/substrate/primitives/utils/src/metrics.rs index 45d68ae4e6f70933edbdac7f31f823ac06ebb2ed..45d8b3b7311d746fcbe523e4e471436b3ee03d39 100644 --- a/substrate/primitives/utils/src/metrics.rs +++ b/substrate/primitives/utils/src/metrics.rs @@ -19,22 +19,20 @@ use lazy_static::lazy_static; use prometheus::{ - Registry, Error as PrometheusError, - core::{ AtomicU64, GenericGauge, GenericCounter }, + core::{AtomicU64, GenericCounter, GenericGauge}, + Error as PrometheusError, Registry, }; #[cfg(feature = "metered")] use prometheus::{core::GenericCounterVec, Opts}; - lazy_static! { - pub static ref TOKIO_THREADS_TOTAL: GenericCounter = GenericCounter::new( - "tokio_threads_total", "Total number of threads created" - ).expect("Creating of statics doesn't fail. qed"); - - pub static ref TOKIO_THREADS_ALIVE: GenericGauge = GenericGauge::new( - "tokio_threads_alive", "Number of threads alive right now" - ).expect("Creating of statics doesn't fail. qed"); + pub static ref TOKIO_THREADS_TOTAL: GenericCounter = + GenericCounter::new("tokio_threads_total", "Total number of threads created") + .expect("Creating of statics doesn't fail. qed"); + pub static ref TOKIO_THREADS_ALIVE: GenericGauge = + GenericGauge::new("tokio_threads_alive", "Number of threads alive right now") + .expect("Creating of statics doesn't fail. qed"); } #[cfg(feature = "metered")] @@ -46,7 +44,6 @@ lazy_static! { } - /// Register the statics to report to registry pub fn register_globals(registry: &Registry) -> Result<(), PrometheusError> { registry.register(Box::new(TOKIO_THREADS_ALIVE.clone()))?; diff --git a/substrate/primitives/utils/src/mpsc.rs b/substrate/primitives/utils/src/mpsc.rs index b033a5527d84a0b699ac8db8beb3153df0116c91..72dcd94c39e049a01bda7b57b0a87412426ba618 100644 --- a/substrate/primitives/utils/src/mpsc.rs +++ b/substrate/primitives/utils/src/mpsc.rs @@ -25,22 +25,26 @@ mod inner { pub type TracingUnboundedReceiver = UnboundedReceiver; /// Alias `mpsc::unbounded` - pub fn tracing_unbounded(_key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { + pub fn tracing_unbounded( + _key: &'static str, + ) -> (TracingUnboundedSender, TracingUnboundedReceiver) { mpsc::unbounded() } } - #[cfg(feature = "metered")] mod inner { - //tracing implementation - use futures::channel::mpsc::{self, - UnboundedReceiver, UnboundedSender, - TryRecvError, TrySendError, SendError + // tracing implementation + use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; + use futures::{ + channel::mpsc::{ + self, SendError, TryRecvError, TrySendError, UnboundedReceiver, UnboundedSender, + }, + sink::Sink, + stream::{FusedStream, Stream}, + task::{Context, Poll}, }; - use futures::{sink::Sink, task::{Poll, Context}, stream::{Stream, FusedStream}}; use std::pin::Pin; - use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; /// Wrapper Type around `UnboundedSender` that increases the global /// measure when a message is added @@ -61,9 +65,11 @@ mod inner { /// Wrapper around `mpsc::unbounded` that tracks the in- and outflow via /// `UNBOUNDED_CHANNELS_COUNTER` - pub fn tracing_unbounded(key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { + pub fn tracing_unbounded( + key: &'static str, + ) -> (TracingUnboundedSender, TracingUnboundedReceiver) { let (s, r) = mpsc::unbounded(); - (TracingUnboundedSender(key, s), TracingUnboundedReceiver(key,r)) + (TracingUnboundedSender(key, s), TracingUnboundedReceiver(key, r)) } impl TracingUnboundedSender { @@ -94,7 +100,7 @@ mod inner { /// Proxy function to mpsc::UnboundedSender pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { - self.1.unbounded_send(msg).map(|s|{ + self.1.unbounded_send(msg).map(|s| { UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"send"]).inc(); s }) @@ -107,25 +113,25 @@ mod inner { } impl TracingUnboundedReceiver { - fn consume(&mut self) { // consume all items, make sure to reflect the updated count let mut count = 0; loop { if self.1.is_terminated() { - break; + break } match self.try_next() { Ok(Some(..)) => count += 1, - _ => break + _ => break, } } // and discount the messages if count > 0 { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"dropped"]).inc_by(count); + UNBOUNDED_CHANNELS_COUNTER + .with_label_values(&[self.0, &"dropped"]) + .inc_by(count); } - } /// Proxy function to mpsc::UnboundedReceiver @@ -158,21 +164,16 @@ mod inner { impl Stream for TracingUnboundedReceiver { type Item = T; - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let s = self.get_mut(); match Pin::new(&mut s.1).poll_next(cx) { Poll::Ready(msg) => { if msg.is_some() { UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[s.0, "received"]).inc(); - } + } Poll::Ready(msg) - } - Poll::Pending => { - Poll::Pending - } + }, + Poll::Pending => Poll::Pending, } } } @@ -186,24 +187,15 @@ mod inner { impl Sink for TracingUnboundedSender { type Error = SendError; - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { TracingUnboundedSender::poll_ready(&*self, cx) } - fn start_send( - mut self: Pin<&mut Self>, - msg: T, - ) -> Result<(), Self::Error> { + fn start_send(mut self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { TracingUnboundedSender::start_send(&mut *self, msg) } - fn poll_flush( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } @@ -219,33 +211,23 @@ mod inner { impl Sink for &TracingUnboundedSender { type Error = SendError; - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { TracingUnboundedSender::poll_ready(*self, cx) } fn start_send(self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { - self.unbounded_send(msg) - .map_err(TrySendError::into_send_error) + self.unbounded_send(msg).map_err(TrySendError::into_send_error) } - fn poll_flush( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn poll_close( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { self.close_channel(); Poll::Ready(Ok(())) } } } -pub use inner::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +pub use inner::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; diff --git a/substrate/primitives/utils/src/status_sinks.rs b/substrate/primitives/utils/src/status_sinks.rs index dc8115670de1e2e0137361f839329acc8871ce51..0870ab119299677347464303c2a291f04bf9a58f 100644 --- a/substrate/primitives/utils/src/status_sinks.rs +++ b/substrate/primitives/utils/src/status_sinks.rs @@ -16,9 +16,13 @@ // limitations under the License. use crate::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use futures::{prelude::*, lock::Mutex}; +use futures::{lock::Mutex, prelude::*}; use futures_timer::Delay; -use std::{pin::Pin, task::{Poll, Context}, time::Duration}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; /// Holds a list of `UnboundedSender`s, each associated with a certain time period. Every time the /// period elapses, we push an element on the sender. @@ -44,7 +48,7 @@ struct YieldAfter { sender: Option>, } -impl Default for StatusSinks { +impl Default for StatusSinks { fn default() -> Self { Self::new() } @@ -56,10 +60,7 @@ impl StatusSinks { let (entries_tx, entries_rx) = tracing_unbounded("status-sinks-entries"); StatusSinks { - inner: Mutex::new(Inner { - entries: stream::FuturesUnordered::new(), - entries_rx, - }), + inner: Mutex::new(Inner { entries: stream::FuturesUnordered::new(), entries_rx }), entries_tx, } } @@ -100,7 +101,7 @@ impl StatusSinks { } }; - futures::select!{ + futures::select! { new_entry = inner.entries_rx.next() => { if let Some(new_entry) = new_entry { inner.entries.push(new_entry); @@ -149,7 +150,7 @@ impl<'a, T> Drop for ReadySinkEvent<'a, T> { fn drop(&mut self) { if let Some(sender) = self.sender.take() { if sender.is_closed() { - return; + return } let _ = self.sinks.entries_tx.unbounded_send(YieldAfter { @@ -170,18 +171,20 @@ impl futures::Future for YieldAfter { match Pin::new(&mut this.delay).poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(()) => { - let sender = this.sender.take() + let sender = this + .sender + .take() .expect("sender is always Some unless the future is finished; qed"); Poll::Ready((sender, this.interval)) - } + }, } } } #[cfg(test)] mod tests { - use crate::mpsc::tracing_unbounded; use super::StatusSinks; + use crate::mpsc::tracing_unbounded; use futures::prelude::*; use std::time::Duration; @@ -208,7 +211,7 @@ mod tests { Box::pin(async { let items: Vec = rx.take(3).collect().await; assert_eq!(items, [6, 7, 8]); - }) + }), )); } } diff --git a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs index 22803f07d811dd253cdf54f3d6d819681079b12a..cdf244f72ce858af10f6845c51fdf4bb8342878f 100644 --- a/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/substrate/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -16,14 +16,14 @@ // limitations under the License. use codec::Encode; +use proc_macro2::{Span, TokenStream}; +use quote::quote; use syn::{ - Expr, ExprLit, FieldValue, ItemConst, Lit, - parse::{Result, Error}, + parse::{Error, Result}, parse_macro_input, spanned::Spanned as _, + Expr, ExprLit, FieldValue, ItemConst, Lit, }; -use quote::quote; -use proc_macro2::{TokenStream, Span}; /// This macro accepts a `const` item that has a struct initializer expression of `RuntimeVersion`-like type. /// The macro will pass through this declaration and append an item declaration that will @@ -78,12 +78,8 @@ impl ParseRuntimeVersion { fn parse_expr(init_expr: &Expr) -> Result { let init_expr = match init_expr { Expr::Struct(ref e) => e, - _ => { - return Err(Error::new( - init_expr.span(), - "expected a struct initializer expression", - )); - } + _ => + return Err(Error::new(init_expr.span(), "expected a struct initializer expression")), }; let mut parsed = ParseRuntimeVersion::default(); @@ -96,12 +92,8 @@ impl ParseRuntimeVersion { fn parse_field_value(&mut self, field_value: &FieldValue) -> Result<()> { let field_name = match field_value.member { syn::Member::Named(ref ident) => ident, - syn::Member::Unnamed(_) => { - return Err(Error::new( - field_value.span(), - "only named members must be used", - )); - } + syn::Member::Unnamed(_) => + return Err(Error::new(field_value.span(), "only named members must be used")), }; fn parse_once( @@ -110,10 +102,7 @@ impl ParseRuntimeVersion { parser: impl FnOnce(&Expr) -> Result, ) -> Result<()> { if value.is_some() { - return Err(Error::new( - field.span(), - "field is already initialized before", - )); + return Err(Error::new(field.span(), "field is already initialized before")) } else { *value = Some(parser(&field.expr)?); Ok(()) @@ -125,21 +114,13 @@ impl ParseRuntimeVersion { } else if field_name == "impl_name" { parse_once(&mut self.impl_name, field_value, Self::parse_str_literal)?; } else if field_name == "authoring_version" { - parse_once( - &mut self.authoring_version, - field_value, - Self::parse_num_literal, - )?; + parse_once(&mut self.authoring_version, field_value, Self::parse_num_literal)?; } else if field_name == "spec_version" { parse_once(&mut self.spec_version, field_value, Self::parse_num_literal)?; } else if field_name == "impl_version" { parse_once(&mut self.impl_version, field_value, Self::parse_num_literal)?; } else if field_name == "transaction_version" { - parse_once( - &mut self.transaction_version, - field_value, - Self::parse_num_literal, - )?; + parse_once(&mut self.transaction_version, field_value, Self::parse_num_literal)?; } else if field_name == "apis" { // Intentionally ignored // @@ -147,7 +128,7 @@ impl ParseRuntimeVersion { // the "runtime_version" custom section. `impl_runtime_apis` is responsible for generating // a custom section with the supported runtime apis descriptor. } else { - return Err(Error::new(field_name.span(), "unknown field")); + return Err(Error::new(field_name.span(), "unknown field")) } Ok(()) @@ -155,16 +136,12 @@ impl ParseRuntimeVersion { fn parse_num_literal(expr: &Expr) -> Result { let lit = match *expr { - Expr::Lit(ExprLit { - lit: Lit::Int(ref lit), - .. - }) => lit, - _ => { + Expr::Lit(ExprLit { lit: Lit::Int(ref lit), .. }) => lit, + _ => return Err(Error::new( expr.span(), "only numeric literals (e.g. `10`) are supported here", - )); - } + )), }; lit.base10_parse::() } @@ -172,44 +149,28 @@ impl ParseRuntimeVersion { fn parse_str_literal(expr: &Expr) -> Result { let mac = match *expr { Expr::Macro(syn::ExprMacro { ref mac, .. }) => mac, - _ => { - return Err(Error::new( - expr.span(), - "a macro expression is expected here", - )); - } + _ => return Err(Error::new(expr.span(), "a macro expression is expected here")), }; let lit: ExprLit = mac.parse_body().map_err(|e| { Error::new( e.span(), - format!( - "a single literal argument is expected, but parsing is failed: {}", - e - ), + format!("a single literal argument is expected, but parsing is failed: {}", e), ) })?; match lit.lit { Lit::Str(ref lit) => Ok(lit.value()), - _ => Err(Error::new( - lit.span(), - "only string literals are supported here", - )), + _ => Err(Error::new(lit.span(), "only string literals are supported here")), } } fn build(self, span: Span) -> Result { macro_rules! required { ($e:expr) => { - $e.ok_or_else(|| - { - Error::new( - span, - format!("required field '{}' is missing", stringify!($e)), - ) - } - )? + $e.ok_or_else(|| { + Error::new(span, format!("required field '{}' is missing", stringify!($e))) + })? }; } diff --git a/substrate/primitives/version/src/embed.rs b/substrate/primitives/version/src/embed.rs index f32bc73d883aa8e030a613608efa3374414a2b14..452762dcf687a3c546421c0496c2a21b191e7af8 100644 --- a/substrate/primitives/version/src/embed.rs +++ b/substrate/primitives/version/src/embed.rs @@ -19,7 +19,7 @@ //! into a WASM file. use codec::Encode; -use parity_wasm::elements::{Module, deserialize_buffer, serialize}; +use parity_wasm::elements::{deserialize_buffer, serialize, Module}; #[derive(Clone, Copy, Eq, PartialEq, Debug, thiserror::Error)] pub enum Error { @@ -40,7 +40,8 @@ pub fn embed_runtime_version( ) -> Result, Error> { let mut module: Module = deserialize_buffer(wasm).map_err(|_| Error::Deserialize)?; - let apis = version.apis + let apis = version + .apis .iter() .map(Encode::encode) .map(|v| v.into_iter()) diff --git a/substrate/primitives/version/src/lib.rs b/substrate/primitives/version/src/lib.rs index aa7ae3da89d58d5fb858791de2a3468b00bd95b7..b3ddb7d7fecc282498a51c8f87438bf7ca831374 100644 --- a/substrate/primitives/version/src/lib.rs +++ b/substrate/primitives/version/src/lib.rs @@ -20,20 +20,20 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -#[cfg(feature = "std")] -use std::fmt; +use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use std::collections::HashSet; +#[cfg(feature = "std")] +use std::fmt; -use codec::{Encode, Decode}; -use sp_runtime::RuntimeString; +use codec::{Decode, Encode}; pub use sp_runtime::create_runtime_str; +use sp_runtime::RuntimeString; #[doc(hidden)] pub use sp_std; #[cfg(feature = "std")] -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; #[cfg(feature = "std")] pub mod embed; @@ -106,7 +106,9 @@ pub type ApisVec = sp_std::borrow::Cow<'static, [(ApiId, u32)]>; /// Create a vector of Api declarations. #[macro_export] macro_rules! create_apis_vec { - ( $y:expr ) => { $crate::sp_std::borrow::Cow::Borrowed(& $y) } + ( $y:expr ) => { + $crate::sp_std::borrow::Cow::Borrowed(&$y) + }; } /// Runtime version. @@ -172,7 +174,9 @@ pub struct RuntimeVersion { #[cfg(feature = "std")] impl fmt::Display for RuntimeVersion { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}-{} ({}-{}.tx{}.au{})", + write!( + f, + "{}-{} ({}-{}.tx{}.au{})", self.spec_name, self.spec_version, self.impl_name, @@ -188,17 +192,13 @@ impl RuntimeVersion { /// Check if this version matches other version for calling into runtime. pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { self.spec_version == other.spec_version && - self.spec_name == other.spec_name && - self.authoring_version == other.authoring_version + self.spec_name == other.spec_name && + self.authoring_version == other.authoring_version } /// Check if the given api with `api_id` is implemented and the version passes the given /// `predicate`. - pub fn has_api_with bool>( - &self, - id: &ApiId, - predicate: P, - ) -> bool { + pub fn has_api_with bool>(&self, id: &ApiId, predicate: P) -> bool { self.apis.iter().any(|(s, v)| s == id && predicate(*v)) } @@ -229,11 +229,10 @@ impl NativeVersion { if self.runtime_version.spec_name != other.spec_name { Err(format!( "`spec_name` does not match `{}` vs `{}`", - self.runtime_version.spec_name, - other.spec_name, + self.runtime_version.spec_name, other.spec_name, )) - } else if self.runtime_version.authoring_version != other.authoring_version - && !self.can_author_with.contains(&other.authoring_version) + } else if self.runtime_version.authoring_version != other.authoring_version && + !self.can_author_with.contains(&other.authoring_version) { Err(format!( "`authoring_version` does not match `{version}` vs `{other_version}` and \ @@ -272,15 +271,13 @@ impl, Block: BlockT> GetRuntimeVersion for st mod apis_serialize { use super::*; use impl_serde::serialize as bytes; - use serde::{Serializer, de, ser::SerializeTuple}; + use serde::{de, ser::SerializeTuple, Serializer}; #[derive(Serialize)] - struct ApiId<'a>( - #[serde(serialize_with="serialize_bytesref")] &'a super::ApiId, - &'a u32, - ); + struct ApiId<'a>(#[serde(serialize_with = "serialize_bytesref")] &'a super::ApiId, &'a u32); - pub fn serialize(apis: &ApisVec, ser: S) -> Result where + pub fn serialize(apis: &ApisVec, ser: S) -> Result + where S: Serializer, { let len = apis.len(); @@ -291,20 +288,18 @@ mod apis_serialize { seq.end() } - pub fn serialize_bytesref(&apis: &&super::ApiId, ser: S) -> Result where + pub fn serialize_bytesref(&apis: &&super::ApiId, ser: S) -> Result + where S: Serializer, { bytes::serialize(apis, ser) } #[derive(Deserialize)] - struct ApiIdOwned( - #[serde(deserialize_with="deserialize_bytes")] - super::ApiId, - u32, - ); + struct ApiIdOwned(#[serde(deserialize_with = "deserialize_bytes")] super::ApiId, u32); - pub fn deserialize<'de, D>(deserializer: D) -> Result where + pub fn deserialize<'de, D>(deserializer: D) -> Result + where D: de::Deserializer<'de>, { struct Visitor; @@ -315,7 +310,8 @@ mod apis_serialize { formatter.write_str("a sequence of api id and version tuples") } - fn visit_seq(self, mut visitor: V) -> Result where + fn visit_seq(self, mut visitor: V) -> Result + where V: de::SeqAccess<'de>, { let mut apis = Vec::new(); @@ -328,8 +324,9 @@ mod apis_serialize { deserializer.deserialize_seq(Visitor) } - pub fn deserialize_bytes<'de, D>(d: D) -> Result where - D: de::Deserializer<'de> + pub fn deserialize_bytes<'de, D>(d: D) -> Result + where + D: de::Deserializer<'de>, { let mut arr = [0; 8]; bytes::deserialize_check_len(d, bytes::ExpectedLen::Exact(&mut arr[..]))?; diff --git a/substrate/primitives/wasm-interface/src/lib.rs b/substrate/primitives/wasm-interface/src/lib.rs index fd200268473b083c8e922cce5a55914970834906..3f1f1c17140369cda543e1f7a94a36a5a43717e4 100644 --- a/substrate/primitives/wasm-interface/src/lib.rs +++ b/substrate/primitives/wasm-interface/src/lib.rs @@ -19,10 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{ - vec, - borrow::Cow, marker::PhantomData, mem, iter::Iterator, result, vec::Vec, -}; +use sp_std::{borrow::Cow, iter::Iterator, marker::PhantomData, mem, result, vec, vec::Vec}; #[cfg(feature = "std")] mod wasmi_impl; @@ -141,10 +138,7 @@ pub struct Pointer { impl Pointer { /// Create a new instance of `Self`. pub fn new(ptr: u32) -> Self { - Self { - ptr, - _marker: Default::default(), - } + Self { ptr, _marker: Default::default() } } /// Calculate the offset from this pointer. @@ -153,12 +147,10 @@ impl Pointer { /// /// Returns an `Option` to respect that the pointer could probably overflow. pub fn offset(self, offset: u32) -> Option { - offset.checked_mul(T::SIZE).and_then(|o| self.ptr.checked_add(o)).map(|ptr| { - Self { - ptr, - _marker: Default::default(), - } - }) + offset + .checked_mul(T::SIZE) + .and_then(|o| self.ptr.checked_add(o)) + .map(|ptr| Self { ptr, _marker: Default::default() }) } /// Create a null pointer. @@ -198,7 +190,9 @@ impl From> for usize { impl IntoValue for Pointer { const VALUE_TYPE: ValueType = ValueType::I32; - fn into_value(self) -> Value { Value::I32(self.ptr as _) } + fn into_value(self) -> Value { + Value::I32(self.ptr as _) + } } impl TryFromValue for Pointer { @@ -224,19 +218,16 @@ pub struct Signature { impl Signature { /// Create a new instance of `Signature`. - pub fn new>>(args: T, return_value: Option) -> Self { - Self { - args: args.into(), - return_value, - } + pub fn new>>( + args: T, + return_value: Option, + ) -> Self { + Self { args: args.into(), return_value } } /// Create a new instance of `Signature` with the given `args` and without any return value. pub fn new_with_args>>(args: T) -> Self { - Self { - args: args.into(), - return_value: None, - } + Self { args: args.into(), return_value: None } } } @@ -500,7 +491,6 @@ mod tests { assert_eq!(ptr.offset(32).unwrap(), Pointer::new(256)); } - #[test] fn return_value_encoded_max_size() { let encoded = ReturnValue::Value(Value::I64(-1)).encode(); diff --git a/substrate/primitives/wasm-interface/src/wasmi_impl.rs b/substrate/primitives/wasm-interface/src/wasmi_impl.rs index 79110487ffca5541c345f12e105c74a469659493..f7e0ec6f16d4a8095524cbc040f63ebd4a5be599 100644 --- a/substrate/primitives/wasm-interface/src/wasmi_impl.rs +++ b/substrate/primitives/wasm-interface/src/wasmi_impl.rs @@ -17,7 +17,7 @@ //! Implementation of conversions between Substrate and wasmi types. -use crate::{Value, ValueType, Signature}; +use crate::{Signature, Value, ValueType}; impl From for wasmi::RuntimeValue { fn from(value: Value) -> Self { diff --git a/substrate/rustfmt.toml b/substrate/rustfmt.toml index 1c9ebe03c02e371e972274dbbf8d80fb471af85d..15e9bdcdf10f15e7e6c02c26d68c268000b301eb 100644 --- a/substrate/rustfmt.toml +++ b/substrate/rustfmt.toml @@ -7,8 +7,6 @@ imports_granularity = "Crate" reorder_imports = true # Consistency newline_style = "Unix" -normalize_comments = true -normalize_doc_attributes = true # Misc chain_width = 80 spaces_around_ranges = false diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs index edba96d760fc2e3787d7f180a2eccda81a15869a..ef778ca96805727e18280d440f3bd385ae420020 100644 --- a/substrate/test-utils/client/src/client_ext.rs +++ b/substrate/test-utils/client/src/client_ext.rs @@ -17,17 +17,13 @@ //! Client extension for tests. +use codec::alloc::collections::hash_map::HashMap; +use sc_client_api::{backend::Finalizer, client::BlockBackend}; use sc_service::client::Client; -use sc_client_api::backend::Finalizer; -use sc_client_api::client::BlockBackend; use sp_consensus::{ - BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError, - ForkChoiceStrategy, + BlockImport, BlockImportParams, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, }; -use sp_runtime::{Justification, Justifications}; -use sp_runtime::traits::{Block as BlockT}; -use sp_runtime::generic::BlockId; -use codec::alloc::collections::hash_map::HashMap; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification, Justifications}; /// Extension trait for a test client. pub trait ClientExt: Sized { @@ -49,11 +45,18 @@ pub trait ClientBlockImportExt: Sized { async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; /// Import a block and make it our best block if possible. - async fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError>; /// Import a block and finalize it. - async fn import_as_final(&mut self, origin: BlockOrigin, block: Block) - -> Result<(), ConsensusError>; + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError>; /// Import block with justification(s), finalizes block. async fn import_justified( @@ -65,11 +68,11 @@ pub trait ClientBlockImportExt: Sized { } impl ClientExt for Client - where - B: sc_client_api::backend::Backend, - E: sc_client_api::CallExecutor + 'static, - Self: BlockImport, - Block: BlockT, +where + B: sc_client_api::backend::Backend, + E: sc_client_api::CallExecutor + 'static, + Self: BlockImport, + Block: BlockT, { fn finalize_block( &self, @@ -87,16 +90,12 @@ impl ClientExt for Client /// This implementation is required, because of the weird api requirements around `BlockImport`. #[async_trait::async_trait] impl ClientBlockImportExt for std::sync::Arc - where - for<'r> &'r T: BlockImport, - Transaction: Send + 'static, - T: Send + Sync, +where + for<'r> &'r T: BlockImport, + Transaction: Send + 'static, + T: Send + Sync, { - async fn import( - &mut self, - origin: BlockOrigin, - block: Block, - ) -> Result<(), ConsensusError> { + async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); @@ -151,18 +150,14 @@ impl ClientBlockImportExt for std::sync::A #[async_trait::async_trait] impl ClientBlockImportExt for Client - where - Self: BlockImport, - RA: Send, - B: Send + Sync, - E: Send, - >::Transaction: Send, +where + Self: BlockImport, + RA: Send, + B: Send + Sync, + E: Send, + >::Transaction: Send, { - async fn import( - &mut self, - origin: BlockOrigin, - block: Block, - ) -> Result<(), ConsensusError> { + async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); diff --git a/substrate/test-utils/client/src/lib.rs b/substrate/test-utils/client/src/lib.rs index 0971c00d784284e5bd39fe944f18dd37ec669f72..d08a01a4decbe1631feb0801f310f2d03873ab20 100644 --- a/substrate/test-utils/client/src/lib.rs +++ b/substrate/test-utils/client/src/lib.rs @@ -21,39 +21,44 @@ pub mod client_ext; +pub use self::client_ext::{ClientBlockImportExt, ClientExt}; pub use sc_client_api::{ - execution_extensions::{ExecutionStrategies, ExecutionExtensions}, - ForkBlocks, BadBlocks, + execution_extensions::{ExecutionExtensions, ExecutionStrategies}, + BadBlocks, ForkBlocks, }; -pub use sc_client_db::{Backend, self}; +pub use sc_client_db::{self, Backend}; +pub use sc_executor::{self, NativeExecutor, WasmExecutionMethod}; +pub use sc_service::{client, RpcHandlers, RpcSession}; pub use sp_consensus; -pub use sc_executor::{NativeExecutor, WasmExecutionMethod, self}; pub use sp_keyring::{ - AccountKeyring, - ed25519::Keyring as Ed25519Keyring, - sr25519::Keyring as Sr25519Keyring, + ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, }; -pub use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; +pub use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; -pub use sc_service::{RpcHandlers, RpcSession, client}; -pub use self::client_ext::{ClientExt, ClientBlockImportExt}; -use std::pin::Pin; -use std::sync::Arc; -use std::collections::{HashSet, HashMap}; -use futures::{future::{Future, FutureExt}, stream::StreamExt}; +use futures::{ + future::{Future, FutureExt}, + stream::StreamExt, +}; +use sc_client_api::BlockchainEvents; +use sc_service::client::{ClientConfig, LocalCallExecutor}; use serde::Deserialize; use sp_core::storage::ChildInfo; -use sp_runtime::{OpaqueExtrinsic, codec::Encode, traits::{Block as BlockT, BlakeTwo256}}; -use sc_service::client::{LocalCallExecutor, ClientConfig}; -use sc_client_api::BlockchainEvents; +use sp_runtime::{ + codec::Encode, + traits::{BlakeTwo256, Block as BlockT}, + OpaqueExtrinsic, +}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, + sync::Arc, +}; /// Test client light database backend. -pub type LightBackend = sc_light::Backend< - sc_client_db::light::LightStorage, - BlakeTwo256, ->; +pub type LightBackend = + sc_light::Backend, BlakeTwo256>; /// A genesis storage initialization trait. pub trait GenesisInit: Default { @@ -84,13 +89,16 @@ pub struct TestClientBuilder { } impl Default - for TestClientBuilder, G> { + for TestClientBuilder, G> +{ fn default() -> Self { Self::with_default_backend() } } -impl TestClientBuilder, G> { +impl + TestClientBuilder, G> +{ /// Create new `TestClientBuilder` with default backend. pub fn with_default_backend() -> Self { let backend = Arc::new(Backend::new_test(std::u32::MAX, std::u64::MAX)); @@ -114,7 +122,9 @@ impl TestClientBuilder TestClientBuilder { +impl + TestClientBuilder +{ /// Create a new instance of the test client builder. pub fn with_backend(backend: Arc) -> Self { TestClientBuilder { @@ -155,20 +165,15 @@ impl TestClientBuilder, ) -> Self { let storage_key = child_info.storage_key(); - let entry = self.child_storage_extension.entry(storage_key.to_vec()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.clone(), - }); + let entry = self.child_storage_extension.entry(storage_key.to_vec()).or_insert_with(|| { + StorageChild { data: Default::default(), child_info: child_info.clone() } + }); entry.data.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); self } /// Set the execution strategy that should be used by all contexts. - pub fn set_execution_strategy( - mut self, - execution_strategy: ExecutionStrategy - ) -> Self { + pub fn set_execution_strategy(mut self, execution_strategy: ExecutionStrategy) -> Self { self.execution_strategies = ExecutionStrategies { syncing: execution_strategy, importing: execution_strategy, @@ -180,7 +185,8 @@ impl TestClientBuilder, bad_blocks: BadBlocks, ) -> Self { @@ -206,14 +212,10 @@ impl TestClientBuilder ( - client::Client< - Backend, - Executor, - Block, - RuntimeApi, - >, + client::Client, sc_consensus::LongestChain, - ) where + ) + where Executor: sc_client_api::CallExecutor + 'static, Backend: sc_client_api::backend::Backend, >::OffchainStorage: 'static, @@ -253,7 +255,8 @@ impl TestClientBuilder TestClientBuilder TestClientBuilder< - Block, - client::LocalCallExecutor>, - Backend, - G, -> { +impl + TestClientBuilder>, Backend, G> +{ /// Build the test client with the given native executor. pub fn build_with_native_executor( self, @@ -276,23 +276,25 @@ impl TestClientBuilder< Backend, client::LocalCallExecutor>, Block, - RuntimeApi + RuntimeApi, >, sc_consensus::LongestChain, - ) where + ) + where I: Into>>, E: sc_executor::NativeExecutionDispatch + 'static, Backend: sc_client_api::backend::Backend + 'static, { - let executor = executor.into().unwrap_or_else(|| - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - ); + let executor = executor + .into() + .unwrap_or_else(|| NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8)); let executor = LocalCallExecutor::new( self.backend.clone(), executor, Box::new(sp_core::testing::TaskExecutor::new()), Default::default(), - ).expect("Creates LocalCallExecutor"); + ) + .expect("Creates LocalCallExecutor"); self.build_with_executor(executor) } @@ -347,8 +349,8 @@ impl RpcHandlersExt for RpcHandlers { ) -> Pin> + Send>> { let (tx, rx) = futures01::sync::mpsc::channel(0); let mem = RpcSession::new(tx.into()); - Box::pin(self - .rpc_query( + Box::pin( + self.rpc_query( &mem, &format!( r#"{{ @@ -360,7 +362,7 @@ impl RpcHandlersExt for RpcHandlers { hex::encode(extrinsic.encode()) ), ) - .map(move |result| parse_rpc_result(result, mem, rx)) + .map(move |result| parse_rpc_result(result, mem, rx)), ) } } @@ -371,26 +373,17 @@ pub(crate) fn parse_rpc_result( receiver: futures01::sync::mpsc::Receiver, ) -> Result { if let Some(ref result) = result { - let json: serde_json::Value = serde_json::from_str(result) - .expect("the result can only be a JSONRPC string; qed"); - let error = json - .as_object() - .expect("JSON result is always an object; qed") - .get("error"); + let json: serde_json::Value = + serde_json::from_str(result).expect("the result can only be a JSONRPC string; qed"); + let error = json.as_object().expect("JSON result is always an object; qed").get("error"); if let Some(error) = error { - return Err( - serde_json::from_value(error.clone()) - .expect("the JSONRPC result's error is always valid; qed") - ) + return Err(serde_json::from_value(error.clone()) + .expect("the JSONRPC result's error is always valid; qed")) } } - Ok(RpcTransactionOutput { - result, - session, - receiver, - }) + Ok(RpcTransactionOutput { result, session, receiver }) } /// An extension trait for `BlockchainEvents`. @@ -420,7 +413,7 @@ where if notification.is_new_best { blocks.insert(notification.hash); if blocks.len() == count { - break; + break } } } @@ -445,31 +438,45 @@ mod tests { assert!(super::parse_rpc_result(None, mem, rx).is_ok()); let (mem, rx) = create_session_and_receiver(); - assert!( - super::parse_rpc_result(Some(r#"{ + assert!(super::parse_rpc_result( + Some( + r#"{ "jsonrpc": "2.0", "result": 19, "id": 1 - }"#.to_string()), mem, rx) - .is_ok(), - ); + }"# + .to_string() + ), + mem, + rx + ) + .is_ok(),); let (mem, rx) = create_session_and_receiver(); - let error = super::parse_rpc_result(Some(r#"{ + let error = super::parse_rpc_result( + Some( + r#"{ "jsonrpc": "2.0", "error": { "code": -32601, "message": "Method not found" }, "id": 1 - }"#.to_string()), mem, rx) - .unwrap_err(); + }"# + .to_string(), + ), + mem, + rx, + ) + .unwrap_err(); assert_eq!(error.code, -32601); assert_eq!(error.message, "Method not found"); assert!(error.data.is_none()); let (mem, rx) = create_session_and_receiver(); - let error = super::parse_rpc_result(Some(r#"{ + let error = super::parse_rpc_result( + Some( + r#"{ "jsonrpc": "2.0", "error": { "code": -32601, @@ -477,8 +484,13 @@ mod tests { "data": 42 }, "id": 1 - }"#.to_string()), mem, rx) - .unwrap_err(); + }"# + .to_string(), + ), + mem, + rx, + ) + .unwrap_err(); assert_eq!(error.code, -32601); assert_eq!(error.message, "Method not found"); assert!(error.data.is_some()); diff --git a/substrate/test-utils/derive/src/lib.rs b/substrate/test-utils/derive/src/lib.rs index fb1cb24cae40de6a59ca874b25336a937173bfdb..877792f82de6a043df168575145c8d5dc314bffa 100644 --- a/substrate/test-utils/derive/src/lib.rs +++ b/substrate/test-utils/derive/src/lib.rs @@ -43,15 +43,15 @@ fn parse_knobs( if sig.inputs.len() != 1 { let msg = "the test function accepts only one argument of type sc_service::TaskExecutor"; - return Err(syn::Error::new_spanned(&sig, msg)); + return Err(syn::Error::new_spanned(&sig, msg)) } let (task_executor_name, task_executor_type) = match sig.inputs.pop().map(|x| x.into_value()) { Some(syn::FnArg::Typed(x)) => (x.pat, x.ty), _ => { let msg = "the test function accepts only one argument of type sc_service::TaskExecutor"; - return Err(syn::Error::new_spanned(&sig, msg)); - } + return Err(syn::Error::new_spanned(&sig, msg)) + }, }; let crate_name = match crate_name("substrate-test-utils") { diff --git a/substrate/test-utils/runtime/client/src/block_builder_ext.rs b/substrate/test-utils/runtime/client/src/block_builder_ext.rs index 0d3211fa05a9da0471de679f5fff04b3bf5ebe72..e8c1d2ac5cd4884a318742d13f55e06ce9503d89 100644 --- a/substrate/test-utils/runtime/client/src/block_builder_ext.rs +++ b/substrate/test-utils/runtime/client/src/block_builder_ext.rs @@ -17,16 +17,19 @@ //! Block Builder extensions for tests. +use sc_client_api::backend; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::ChangesTrieConfiguration; -use sc_client_api::backend; use sc_block_builder::BlockBuilderApi; /// Extension trait for test block builder. pub trait BlockBuilderExt { /// Add transfer extrinsic to the block. - fn push_transfer(&mut self, transfer: substrate_test_runtime::Transfer) -> Result<(), sp_blockchain::Error>; + fn push_transfer( + &mut self, + transfer: substrate_test_runtime::Transfer, + ) -> Result<(), sp_blockchain::Error>; /// Add storage change extrinsic to the block. fn push_storage_change( &mut self, @@ -40,16 +43,21 @@ pub trait BlockBuilderExt { ) -> Result<(), sp_blockchain::Error>; } -impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> where +impl<'a, A, B> BlockBuilderExt + for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> +where A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + - ApiExt< + A::Api: BlockBuilderApi + + ApiExt< substrate_test_runtime::Block, - StateBackend = backend::StateBackendFor + StateBackend = backend::StateBackendFor, >, B: backend::Backend, { - fn push_transfer(&mut self, transfer: substrate_test_runtime::Transfer) -> Result<(), sp_blockchain::Error> { + fn push_transfer( + &mut self, + transfer: substrate_test_runtime::Transfer, + ) -> Result<(), sp_blockchain::Error> { self.push(transfer.into_signed_tx()) } diff --git a/substrate/test-utils/runtime/client/src/lib.rs b/substrate/test-utils/runtime/client/src/lib.rs index a9ff26a5adf8d8739a6038e94e66f5c8a6048243..3db433968c9f8515509c133a74de46afa5f510bc 100644 --- a/substrate/test-utils/runtime/client/src/lib.rs +++ b/substrate/test-utils/runtime/client/src/lib.rs @@ -23,34 +23,36 @@ pub mod trait_tests; mod block_builder_ext; -use std::sync::Arc; -use std::collections::HashMap; +pub use sc_consensus::LongestChain; +use std::{collections::HashMap, sync::Arc}; pub use substrate_test_client::*; pub use substrate_test_runtime as runtime; -pub use sc_consensus::LongestChain; pub use self::block_builder_ext::BlockBuilderExt; -use sp_core::{sr25519, ChangesTrieConfiguration}; -use sp_core::storage::{ChildInfo, Storage, StorageChild}; -use substrate_test_runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor, HashFor}; use sc_client_api::light::{ - RemoteCallRequest, RemoteChangesRequest, RemoteBodyRequest, - Fetcher, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, + Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, + RemoteReadChildRequest, RemoteReadRequest, }; +use sp_core::{ + sr25519, + storage::{ChildInfo, Storage, StorageChild}, + ChangesTrieConfiguration, +}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; +use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; /// A prelude to import in tests. pub mod prelude { // Trait extensions pub use super::{ - BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, - ClientBlockImportExt, + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClientBuilderExt, }; // Client structs pub use super::{ - TestClient, TestClientBuilder, Backend, LightBackend, - Executor, LightExecutor, LocalExecutor, NativeExecutor, WasmExecutionMethod, + Backend, Executor, LightBackend, LightExecutor, LocalExecutor, NativeExecutor, TestClient, + TestClientBuilder, WasmExecutionMethod, }; // Keyring pub use super::{AccountKeyring, Sr25519Keyring}; @@ -82,10 +84,10 @@ pub type LightExecutor = sc_light::GenesisCallExecutor< substrate_test_runtime::Block, sc_light::Backend< sc_client_db::light::LightStorage, - HashFor + HashFor, >, - NativeExecutor - > + NativeExecutor, + >, >; /// Parameters of test-client builder with test-runtime. @@ -130,19 +132,23 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let mut storage = self.genesis_config().genesis_map(); if let Some(ref code) = self.wasm_code { - storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); + storage + .top + .insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); } let child_roots = storage.children_default.iter().map(|(_sk, child_content)| { - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_content.data.clone().into_iter().collect() - ); + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + child_content.data.clone().into_iter().collect(), + ); let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); (prefixed_storage_key.into_inner(), state_root.encode()) }); - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.top.clone().into_iter().chain(child_roots).collect() - ); + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + storage.top.clone().into_iter().chain(child_roots).collect(), + ); let block: runtime::Block = client::genesis::construct_genesis_block(state_root); storage.top.extend(additional_storage_with_genesis(&block)); @@ -164,7 +170,7 @@ pub type Client = client::Client< client::LocalCallExecutor< substrate_test_runtime::Block, B, - sc_executor::NativeExecutor + sc_executor::NativeExecutor, >, substrate_test_runtime::Block, substrate_test_runtime::RuntimeApi, @@ -217,12 +223,16 @@ pub trait TestClientBuilderExt: Sized { let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.children_default + self.genesis_init_mut() + .extra_storage + .children_default .entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.clone(), - }).data.insert(key, value.into()); + }) + .data + .insert(key, value.into()); self } @@ -244,27 +254,32 @@ pub trait TestClientBuilderExt: Sized { } /// Build the test client and longest chain selector. - fn build_with_longest_chain(self) -> (Client, sc_consensus::LongestChain); + fn build_with_longest_chain( + self, + ) -> (Client, sc_consensus::LongestChain); /// Build the test client and the backend. fn build_with_backend(self) -> (Client, Arc); } -impl TestClientBuilderExt for TestClientBuilder< - client::LocalCallExecutor< - substrate_test_runtime::Block, +impl TestClientBuilderExt + for TestClientBuilder< + client::LocalCallExecutor< + substrate_test_runtime::Block, + B, + sc_executor::NativeExecutor, + >, B, - sc_executor::NativeExecutor - >, - B -> where + > where B: sc_client_api::backend::Backend + 'static, { fn genesis_init_mut(&mut self) -> &mut GenesisParameters { Self::genesis_init_mut(self) } - fn build_with_longest_chain(self) -> (Client, sc_consensus::LongestChain) { + fn build_with_longest_chain( + self, + ) -> (Client, sc_consensus::LongestChain) { self.build_with_native_executor(None) } @@ -275,7 +290,8 @@ impl TestClientBuilderExt for TestClientBuilder< } /// Type of optional fetch callback. -type MaybeFetcherCallback = Option Result + Send + Sync>>; +type MaybeFetcherCallback = + Option Result + Send + Sync>>; /// Type of fetcher future result. type FetcherFutureResult = futures::future::Ready>; @@ -284,7 +300,10 @@ type FetcherFutureResult = futures::future::Ready, Vec>, - body: MaybeFetcherCallback, Vec>, + body: MaybeFetcherCallback< + RemoteBodyRequest, + Vec, + >, } impl LightFetcher { @@ -293,21 +312,18 @@ impl LightFetcher { self, call: MaybeFetcherCallback, Vec>, ) -> Self { - LightFetcher { - call, - body: self.body, - } + LightFetcher { call, body: self.body } } /// Sets remote body callback. pub fn with_remote_body( self, - body: MaybeFetcherCallback, Vec>, + body: MaybeFetcherCallback< + RemoteBodyRequest, + Vec, + >, ) -> Self { - LightFetcher { - call: self.call, - body, - } + LightFetcher { call: self.call, body } } } @@ -315,14 +331,21 @@ impl Fetcher for LightFetcher { type RemoteHeaderResult = FetcherFutureResult; type RemoteReadResult = FetcherFutureResult, Option>>>; type RemoteCallResult = FetcherFutureResult>; - type RemoteChangesResult = FetcherFutureResult, u32)>>; + type RemoteChangesResult = + FetcherFutureResult, u32)>>; type RemoteBodyResult = FetcherFutureResult>; - fn remote_header(&self, _: RemoteHeaderRequest) -> Self::RemoteHeaderResult { + fn remote_header( + &self, + _: RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult { unimplemented!() } - fn remote_read(&self, _: RemoteReadRequest) -> Self::RemoteReadResult { + fn remote_read( + &self, + _: RemoteReadRequest, + ) -> Self::RemoteReadResult { unimplemented!() } @@ -333,18 +356,27 @@ impl Fetcher for LightFetcher { unimplemented!() } - fn remote_call(&self, req: RemoteCallRequest) -> Self::RemoteCallResult { + fn remote_call( + &self, + req: RemoteCallRequest, + ) -> Self::RemoteCallResult { match self.call { Some(ref call) => futures::future::ready(call(req)), None => unimplemented!(), } } - fn remote_changes(&self, _: RemoteChangesRequest) -> Self::RemoteChangesResult { + fn remote_changes( + &self, + _: RemoteChangesRequest, + ) -> Self::RemoteChangesResult { unimplemented!() } - fn remote_body(&self, req: RemoteBodyRequest) -> Self::RemoteBodyResult { + fn remote_body( + &self, + req: RemoteBodyRequest, + ) -> Self::RemoteBodyResult { match self.body { Some(ref body) => futures::future::ready(body(req)), None => unimplemented!(), @@ -359,10 +391,14 @@ pub fn new() -> Client { /// Creates new light client instance used for tests. pub fn new_light() -> ( - client::Client, + client::Client< + LightBackend, + LightExecutor, + substrate_test_runtime::Block, + substrate_test_runtime::RuntimeApi, + >, Arc, ) { - let storage = sc_client_db::light::LightStorage::new_test(); let blockchain = Arc::new(sc_light::Blockchain::new(storage)); let backend = Arc::new(LightBackend::new(blockchain)); @@ -372,11 +408,9 @@ pub fn new_light() -> ( executor, Box::new(sp_core::testing::TaskExecutor::new()), Default::default(), - ).expect("Creates LocalCallExecutor"); - let call_executor = LightExecutor::new( - backend.clone(), - local_call_executor, - ); + ) + .expect("Creates LocalCallExecutor"); + let call_executor = LightExecutor::new(backend.clone(), local_call_executor); ( TestClientBuilder::with_backend(backend.clone()) diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index 797c7ec089bd6c8ecfd3e43f1ca590bf6f79d6c0..ef3555f704a623c2874e19a9cfc6ad8dbb87ade8 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -23,192 +23,169 @@ use std::sync::Arc; use crate::{ - AccountKeyring, ClientBlockImportExt, BlockBuilderExt, TestClientBuilder, TestClientBuilderExt, + AccountKeyring, BlockBuilderExt, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, +}; +use futures::executor::block_on; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + backend, + blockchain::{Backend as BlockChainBackendT, HeaderBackend}, }; -use sc_client_api::backend; -use sc_client_api::blockchain::{Backend as BlockChainBackendT, HeaderBackend}; use sp_consensus::BlockOrigin; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use substrate_test_runtime::{self, Transfer}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::Block as BlockT; -use sc_block_builder::BlockBuilderProvider; -use futures::executor::block_on; /// helper to test the `leaves` implementation for various backends -pub fn test_leaves_for_backend(backend: Arc) where +pub fn test_leaves_for_backend(backend: Arc) +where B: backend::Backend, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); let genesis_hash = client.chain_info().genesis_hash; - assert_eq!( - blockchain.leaves().unwrap(), - vec![genesis_hash]); + assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a1.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a1.hash()],); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); #[allow(deprecated)] - assert_eq!( - blockchain.leaves().unwrap(), - vec![a2.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a2.hash()],); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a3.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a3.hash()],); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a4.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a4.hash()],); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash()],); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b2.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b2.hash()],); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b3.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b3.hash()],); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()],); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash()],); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()], - ); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()],); } /// helper to test the `children` implementation for various backends -pub fn test_children_for_backend(backend: Arc) where +pub fn test_children_for_backend(backend: Arc) +where B: backend::LocalBackend, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); @@ -218,98 +195,104 @@ pub fn test_children_for_backend(backend: Arc) where block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); @@ -334,9 +317,9 @@ where { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 let mut client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); @@ -345,98 +328,104 @@ where block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); let b2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); let c3 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, c3)).unwrap(); // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); let d2 = builder.build().unwrap().block; block_on(client.import(BlockOrigin::Own, d2)).unwrap(); diff --git a/substrate/test-utils/runtime/src/genesismap.rs b/substrate/test-utils/runtime/src/genesismap.rs index 63c4bab55ec49b7ea1e0d037e41b7c74de99ea9e..a8801b8519dfecd576eac933a11265dd47e2869b 100644 --- a/substrate/test-utils/runtime/src/genesismap.rs +++ b/substrate/test-utils/runtime/src/genesismap.rs @@ -17,14 +17,17 @@ //! Tool for creating the genesis block. -use std::collections::BTreeMap; +use super::{system, wasm_binary_unwrap, AccountId, AuthorityId}; +use codec::{Encode, Joiner, KeyedVec}; +use sc_service::client::genesis; +use sp_core::{ + map, + storage::{well_known_keys, Storage}, + ChangesTrieConfiguration, +}; use sp_io::hashing::{blake2_256, twox_128}; -use super::{AuthorityId, AccountId, wasm_binary_unwrap, system}; -use codec::{Encode, KeyedVec, Joiner}; -use sp_core::{ChangesTrieConfiguration, map}; -use sp_core::storage::{well_known_keys, Storage}; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; -use sc_service::client::genesis; +use std::collections::BTreeMap; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { @@ -47,7 +50,7 @@ impl GenesisConfig { ) -> Self { GenesisConfig { changes_trie_config, - authorities: authorities, + authorities, balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), heap_pages_override, extra_storage, @@ -56,16 +59,23 @@ impl GenesisConfig { pub fn genesis_map(&self) -> Storage { let wasm_runtime = wasm_binary_unwrap().to_vec(); - let mut map: BTreeMap, Vec> = self.balances.iter() - .map(|&(ref account, balance)| (account.to_keyed_vec(b"balance:"), vec![].and(&balance))) + let mut map: BTreeMap, Vec> = self + .balances + .iter() + .map(|&(ref account, balance)| { + (account.to_keyed_vec(b"balance:"), vec![].and(&balance)) + }) .map(|(k, v)| (blake2_256(&k[..])[..].to_vec(), v.to_vec())) - .chain(vec![ - (well_known_keys::CODE.into(), wasm_runtime), - ( - well_known_keys::HEAP_PAGES.into(), - vec![].and(&(self.heap_pages_override.unwrap_or(16 as u64))), - ), - ].into_iter()) + .chain( + vec![ + (well_known_keys::CODE.into(), wasm_runtime), + ( + well_known_keys::HEAP_PAGES.into(), + vec![].and(&(self.heap_pages_override.unwrap_or(16 as u64))), + ), + ] + .into_iter(), + ) .collect(); if let Some(ref changes_trie_config) = self.changes_trie_config { map.insert(well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), changes_trie_config.encode()); @@ -75,28 +85,30 @@ impl GenesisConfig { map.extend(self.extra_storage.top.clone().into_iter()); // Assimilate the system genesis config. - let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone()}; + let mut storage = + Storage { top: map, children_default: self.extra_storage.children_default.clone() }; let mut config = system::GenesisConfig::default(); config.authorities = self.authorities.clone(); - config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); + config + .assimilate_storage(&mut storage) + .expect("Adding `system::GensisConfig` to the genesis"); storage } } -pub fn insert_genesis_block( - storage: &mut Storage, -) -> sp_core::hash::H256 { +pub fn insert_genesis_block(storage: &mut Storage) -> sp_core::hash::H256 { let child_roots = storage.children_default.iter().map(|(sk, child_content)| { - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_content.data.clone().into_iter().collect(), - ); + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + child_content.data.clone().into_iter().collect(), + ); (sk.clone(), state_root.encode()) }); // add child roots to storage storage.top.extend(child_roots); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.top.clone().into_iter().collect() + storage.top.clone().into_iter().collect(), ); let block: crate::Block = genesis::construct_genesis_block(state_root); let genesis_hash = block.header.hash(); diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index f4c722ab12c2bf023a3e3de98a4e67d89756e49e..62aa28d4260ac8bbc0c37569e47bde6497874ca2 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -23,45 +23,43 @@ pub mod genesismap; pub mod system; -use sp_std::{prelude::*, marker::PhantomData}; -use codec::{Encode, Decode, Input, Error}; +use codec::{Decode, Encode, Error, Input}; +use sp_std::{marker::PhantomData, prelude::*}; +use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; -use sp_application_crypto::{ed25519, sr25519, ecdsa, RuntimeAppPublic}; -use trie_db::{TrieMut, Trie}; -use sp_trie::{PrefixedMemoryDB, StorageProof}; -use sp_trie::trie_types::{TrieDB, TrieDBMut}; +use sp_trie::{ + trie_types::{TrieDB, TrieDBMut}, + PrefixedMemoryDB, StorageProof, +}; +use trie_db::{Trie, TrieMut}; +use cfg_if::cfg_if; +use frame_support::{parameter_types, traits::KeyOwnerProofSystem, weights::RuntimeDbWeight}; +use frame_system::limits::{BlockLength, BlockWeights}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; +pub use sp_core::hash::H256; +use sp_inherents::{CheckInherentsResult, InherentData}; +#[cfg(feature = "std")] +use sp_runtime::traits::NumberFor; use sp_runtime::{ create_runtime_str, impl_opaque_keys, - ApplyExtrinsicResult, Perbill, - transaction_validity::{ - TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, - TransactionSource, - }, traits::{ - BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT, - GetNodeBlockType, GetRuntimeBlockType, Verify, IdentityLookup, + BlakeTwo256, BlindCheckable, Block as BlockT, Extrinsic as ExtrinsicT, GetNodeBlockType, + GetRuntimeBlockType, IdentityLookup, Verify, }, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + ValidTransaction, + }, + ApplyExtrinsicResult, Perbill, }; -#[cfg(feature = "std")] -use sp_runtime::traits::NumberFor; -use sp_version::RuntimeVersion; -pub use sp_core::hash::H256; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use frame_support::{ - parameter_types, - traits::KeyOwnerProofSystem, - weights::RuntimeDbWeight, -}; -use frame_system::limits::{BlockWeights, BlockLength}; -use sp_inherents::{CheckInherentsResult, InherentData}; -use cfg_if::cfg_if; +use sp_version::RuntimeVersion; // Ensure Babe and Aura use the same crypto to simplify things a bit. -pub use sp_consensus_babe::{AuthorityId, Slot, AllowedSlots}; +pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; @@ -77,18 +75,19 @@ pub mod wasm_binary_logging_disabled { /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ - supported with the flag disabled.") + WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only \ + supported with the flag disabled.", + ) } /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_logging_disabled_unwrap() -> &'static [u8] { - wasm_binary_logging_disabled::WASM_BINARY - .expect( - "Development wasm binary is not available. Testing is only supported with the flag \ - disabled." - ) + wasm_binary_logging_disabled::WASM_BINARY.expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled.", + ) } /// Test runtime version. @@ -110,10 +109,7 @@ fn version() -> RuntimeVersion { /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } /// Calls in transactions. @@ -130,12 +126,10 @@ impl Transfer { #[cfg(feature = "std")] pub fn into_signed_tx(self) -> Extrinsic { let signature = sp_keyring::AccountKeyring::from_public(&self.from) - .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer { - transfer: self, - signature, - exhaust_resources_when_not_first: false, - } + .expect("Creates keyring from public key.") + .sign(&self.encode()) + .into(); + Extrinsic::Transfer { transfer: self, signature, exhaust_resources_when_not_first: false } } /// Convert into a signed extrinsic, which will only end up included in the block @@ -144,12 +138,10 @@ impl Transfer { #[cfg(feature = "std")] pub fn into_resources_exhausting_tx(self) -> Extrinsic { let signature = sp_keyring::AccountKeyring::from_public(&self.from) - .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer { - transfer: self, - signature, - exhaust_resources_when_not_first: true, - } + .expect("Creates keyring from public key.") + .sign(&self.encode()) + .into(); + Extrinsic::Transfer { transfer: self, signature, exhaust_resources_when_not_first: true } } } @@ -174,7 +166,10 @@ parity_util_mem::malloc_size_of_is_0!(Extrinsic); // non-opaque extrinsic does n #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { self.using_encoded(|bytes| seq.serialize_bytes(bytes)) } } @@ -185,21 +180,22 @@ impl BlindCheckable for Extrinsic { fn check(self) -> Result { match self { Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), - Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => { + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => if sp_runtime::verify_encoded_lazy(&signature, &transfer, &transfer.from) { - Ok(Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first }) + Ok(Extrinsic::Transfer { + transfer, + signature, + exhaust_resources_when_not_first, + }) } else { Err(InvalidTransaction::BadProof.into()) - } - }, + }, Extrinsic::IncludeData(v) => Ok(Extrinsic::IncludeData(v)), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), Extrinsic::ChangesTrieConfigUpdate(new_config) => Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), - Extrinsic::OffchainIndexSet(key, value) => - Ok(Extrinsic::OffchainIndexSet(key, value)), - Extrinsic::OffchainIndexClear(key) => - Ok(Extrinsic::OffchainIndexClear(key)), + Extrinsic::OffchainIndexSet(key, value) => Ok(Extrinsic::OffchainIndexSet(key, value)), + Extrinsic::OffchainIndexClear(key) => Ok(Extrinsic::OffchainIndexClear(key)), Extrinsic::Store(data) => Ok(Extrinsic::Store(data)), } } @@ -301,9 +297,7 @@ impl codec::EncodeLike for DecodeFails {} impl DecodeFails { /// Create a new instance. pub fn new() -> DecodeFails { - DecodeFails { - _phantom: Default::default(), - } + DecodeFails { _phantom: Default::default() } } } @@ -619,7 +613,8 @@ fn code_using_trie() -> u64 { let pairs = [ (b"0103000000000000000464".to_vec(), b"0400000000".to_vec()), (b"0103000000000000000469".to_vec(), b"0401000000".to_vec()), - ].to_vec(); + ] + .to_vec(); let mut mdb = PrefixedMemoryDB::default(); let mut root = sp_std::default::Default::default(); @@ -627,10 +622,10 @@ fn code_using_trie() -> u64 { let v = &pairs; let mut t = TrieDBMut::::new(&mut mdb, &mut root); for i in 0..v.len() { - let key: &[u8]= &v[i].0; + let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; if !t.insert(key, val).is_ok() { - return 101; + return 101 } } t @@ -645,8 +640,12 @@ fn code_using_trie() -> u64 { } } iter_pairs.len() as u64 - } else { 102 } - } else { 103 } + } else { + 102 + } + } else { + 103 + } } impl_opaque_keys! { @@ -1206,29 +1205,15 @@ fn test_read_storage() { fn test_read_child_storage() { const STORAGE_KEY: &[u8] = b"unique_id_1"; const KEY: &[u8] = b":read_child_storage"; - sp_io::default_child_storage::set( - STORAGE_KEY, - KEY, - b"test", - ); + sp_io::default_child_storage::set(STORAGE_KEY, KEY, b"test"); let mut v = [0u8; 4]; - let r = sp_io::default_child_storage::read( - STORAGE_KEY, - KEY, - &mut v, - 0, - ); + let r = sp_io::default_child_storage::read(STORAGE_KEY, KEY, &mut v, 0); assert_eq!(r, Some(4)); assert_eq!(&v, b"test"); let mut v = [0u8; 4]; - let r = sp_io::default_child_storage::read( - STORAGE_KEY, - KEY, - &mut v, - 8, - ); + let r = sp_io::default_child_storage::read(STORAGE_KEY, KEY, &mut v, 8); assert_eq!(r, Some(0)); assert_eq!(&v, &[0, 0, 0, 0]); } @@ -1236,10 +1221,7 @@ fn test_read_child_storage() { fn test_witness(proof: StorageProof, root: crate::Hash) { use sp_externalities::Externalities; let db: sp_trie::MemoryDB = proof.into_memory_db(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new( - db, - root, - ); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let mut overlay = sp_state_machine::OverlayedChanges::default(); let mut cache = sp_state_machine::StorageTransactionCache::<_, _, BlockNumber>::default(); let mut ext = sp_state_machine::Ext::new( @@ -1259,18 +1241,16 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { #[cfg(test)] mod tests { - use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, - DefaultTestClientBuilderExt, TestClientBuilder, - runtime::TestAPI, - }; + use codec::Encode; + use sc_block_builder::BlockBuilderProvider; use sp_api::ProvideRuntimeApi; - use sp_runtime::generic::BlockId; use sp_core::storage::well_known_keys::HEAP_PAGES; + use sp_runtime::generic::BlockId; use sp_state_machine::ExecutionStrategy; - use codec::Encode; - use sc_block_builder::BlockBuilderProvider; + use substrate_test_runtime_client::{ + prelude::*, runtime::TestAPI, sp_consensus::BlockOrigin, DefaultTestClientBuilderExt, + TestClientBuilder, + }; #[test] fn heap_pages_is_respected() { @@ -1307,9 +1287,8 @@ mod tests { #[test] fn test_storage() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::Both) - .build(); + let client = + TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -1331,14 +1310,10 @@ mod tests { #[test] fn witness_backend_works() { let (db, root) = witness_backend(); - let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new( - db, - root, - ); + let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let proof = sp_state_machine::prove_read(backend, vec![b"value3"]).unwrap(); - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::Both) - .build(); + let client = + TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); diff --git a/substrate/test-utils/runtime/src/system.rs b/substrate/test-utils/runtime/src/system.rs index c4b88c09e8d23db7b8721a917b3990940a15323e..316a553ed027d469f4eb986de60873612b1c05a6 100644 --- a/substrate/test-utils/runtime/src/system.rs +++ b/substrate/test-utils/runtime/src/system.rs @@ -18,25 +18,27 @@ //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. -use sp_std::prelude::*; +use crate::{ + AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, +}; +use codec::{Decode, Encode, KeyedVec}; +use frame_support::{decl_module, decl_storage, storage}; +use frame_system::Config; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ - storage::root as storage_root, storage::changes_root as storage_changes_root, - hashing::blake2_256, trie, + hashing::blake2_256, + storage::{changes_root as storage_changes_root, root as storage_root}, + trie, }; -use frame_support::storage; -use frame_support::{decl_storage, decl_module}; use sp_runtime::{ - traits::Header as _, generic, ApplyExtrinsicResult, + generic, + traits::Header as _, transaction_validity::{ - TransactionValidity, ValidTransaction, InvalidTransaction, TransactionValidityError, + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, + ApplyExtrinsicResult, }; -use codec::{KeyedVec, Encode, Decode}; -use frame_system::Config; -use crate::{ - AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId -}; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; @@ -159,17 +161,17 @@ impl frame_support::traits::ExecuteBlock for BlockExecutor { /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { - return InvalidTransaction::Future.into(); + return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); @@ -181,20 +183,14 @@ pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { let provides = vec![encode(&tx.from, tx.nonce)]; - Ok(ValidTransaction { - priority: tx.amount, - requires, - provides, - longevity: 64, - propagate: true, - }) + Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { - let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX) - .unwrap_or_default(); + let extrinsic_index: u32 = + storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); @@ -215,8 +211,8 @@ pub fn finalize_block() -> Header { // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. - let storage_root = Hash::decode(&mut &storage_root()[..]) - .expect("`storage_root` is a valid hash"); + let storage_root = + Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); @@ -231,17 +227,11 @@ pub fn finalize_block() -> Header { if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(new_config) + generic::ChangesTrieSignal::NewConfiguration(new_config), )); } - Header { - number, - extrinsics_root, - state_root: storage_root, - parent_hash, - digest, - } + Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] @@ -253,12 +243,11 @@ fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { - Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => + Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } + if extrinsic_index != 0 => Err(InvalidTransaction::ExhaustsResources.into()), - Extrinsic::Transfer { ref transfer, .. } => - execute_transfer_backend(transfer), - Extrinsic::AuthoritiesChange(ref new_auth) => - execute_new_authorities_backend(new_auth), + Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), + Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), @@ -271,9 +260,8 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx Extrinsic::OffchainIndexClear(key) => { sp_io::offchain_index::clear(&key); Ok(Ok(())) - } - Extrinsic::Store(data) => - execute_store(data.clone()), + }, + Extrinsic::Store(data) => execute_store(data.clone()), } } @@ -282,7 +270,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if !(tx.nonce == expected_nonce) { - return Err(InvalidTransaction::Stale.into()); + return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage @@ -294,7 +282,7 @@ fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // enact transfer if !(tx.amount <= from_balance) { - return Err(InvalidTransaction::Payment.into()); + return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); @@ -323,12 +311,12 @@ fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicRes Ok(Ok(())) } -fn execute_changes_trie_config_update(new_config: Option) -> ApplyExtrinsicResult { +fn execute_changes_trie_config_update( + new_config: Option, +) -> ApplyExtrinsicResult { match new_config.clone() { - Some(new_config) => storage::unhashed::put_raw( - well_known_keys::CHANGES_TRIE_CONFIG, - &new_config.encode(), - ), + Some(new_config) => + storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } ::put(new_config); @@ -360,19 +348,18 @@ fn info_expect_equal_hash(given: &Hash, expected: &Hash) { mod tests { use super::*; - use sp_io::TestExternalities; + use crate::{wasm_binary_unwrap, Header, Transfer}; + use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; + use sp_core::{ + map, + traits::{CodeExecutor, RuntimeCode}, + NeverNativeValue, + }; + use sp_io::{hashing::twox_128, TestExternalities}; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; - use crate::{Header, Transfer, wasm_binary_unwrap}; - use sp_core::{NeverNativeValue, map, traits::{CodeExecutor, RuntimeCode}}; - use sc_executor::{NativeExecutor, WasmExecutionMethod, native_executor_instance}; - use sp_io::hashing::twox_128; // Declare an instance of the native executor dispatch for the test runtime. - native_executor_instance!( - NativeDispatch, - crate::api::dispatch, - crate::native_version - ); + native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) @@ -382,7 +369,7 @@ mod tests { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), - Sr25519Keyring::Charlie.to_raw_public() + Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), @@ -399,7 +386,10 @@ mod tests { ) } - fn block_import_works(block_executor: F) where F: Fn(Block, &mut TestExternalities) { + fn block_import_works(block_executor: F) + where + F: Fn(Block, &mut TestExternalities), + { let h = Header { parent_hash: [69u8; 32].into(), number: 1, @@ -407,10 +397,7 @@ mod tests { extrinsics_root: Default::default(), digest: Default::default(), }; - let mut b = Block { - header: h, - extrinsics: vec![], - }; + let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); @@ -419,7 +406,11 @@ mod tests { #[test] fn block_import_works_native() { - block_import_works(|b, ext| ext.execute_with(|| { execute_block(b); })); + block_import_works(|b, ext| { + ext.execute_with(|| { + execute_block(b); + }) + }); } #[test] @@ -432,19 +423,23 @@ mod tests { heap_pages: None, }; - executor().call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ).0.unwrap(); + executor() + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) + .0 + .unwrap(); }) } fn block_import_with_transaction_works(block_executor: F) - where F: Fn(Block, &mut TestExternalities) + where + F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { @@ -454,14 +449,13 @@ mod tests { extrinsics_root: Default::default(), digest: Default::default(), }, - extrinsics: vec![ - Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 69, - nonce: 0, - }.into_signed_tx() - ], + extrinsics: vec![Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 69, + nonce: 0, + } + .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); @@ -481,13 +475,15 @@ mod tests { to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, - }.into_signed_tx(), + } + .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, - }.into_signed_tx(), + } + .into_signed_tx(), ], }; @@ -519,7 +515,11 @@ mod tests { #[test] fn block_import_with_transaction_works_native() { - block_import_with_transaction_works(|b, ext| ext.execute_with(|| { execute_block(b); })); + block_import_with_transaction_works(|b, ext| { + ext.execute_with(|| { + execute_block(b); + }) + }); } #[test] @@ -532,14 +532,17 @@ mod tests { heap_pages: None, }; - executor().call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ).0.unwrap(); + executor() + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) + .0 + .unwrap(); }) } } diff --git a/substrate/test-utils/runtime/transaction-pool/src/lib.rs b/substrate/test-utils/runtime/transaction-pool/src/lib.rs index b3717d22a8bed02211b71166389dacc2ffc11a3f..d0cd50394c533514885114b6a061ae2ded73a14c 100644 --- a/substrate/test-utils/runtime/transaction-pool/src/lib.rs +++ b/substrate/test-utils/runtime/transaction-pool/src/lib.rs @@ -20,22 +20,22 @@ //! See [`TestApi`] for more information. use codec::Encode; +use futures::future::ready; use parking_lot::RwLock; +use sp_blockchain::CachedHeaderMetadata; use sp_runtime::{ generic::{self, BlockId}, - traits::{BlakeTwo256, Hash as HashT, Block as BlockT, Header as _}, + traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as _}, transaction_validity::{ - TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, - TransactionSource, + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + ValidTransaction, }, }; -use std::collections::{HashSet, HashMap, BTreeMap}; +use std::collections::{BTreeMap, HashMap, HashSet}; use substrate_test_runtime_client::{ - runtime::{Index, AccountId, Block, BlockNumber, Extrinsic, Hash, Header, Transfer}, + runtime::{AccountId, Block, BlockNumber, Extrinsic, Hash, Header, Index, Transfer}, AccountKeyring::{self, *}, }; -use sp_blockchain::CachedHeaderMetadata; -use futures::future::ready; /// Error type used by [`TestApi`]. #[derive(Debug, derive_more::From, derive_more::Display)] @@ -130,12 +130,9 @@ impl TestApi { block_number .checked_sub(1) .and_then(|num| { - chain.block_by_number - .get(&num) - .map(|blocks| { - blocks[0].0.header.hash() - }) - }).unwrap_or_default() + chain.block_by_number.get(&num).map(|blocks| blocks[0].0.header.hash()) + }) + .unwrap_or_default() }; self.push_block_with_parent(parent_hash, xts, is_best_block) @@ -154,7 +151,9 @@ impl TestApi { let block_number = if parent == Hash::default() { 0 } else { - *self.chain.read() + *self + .chain + .read() .block_by_hash .get(&parent) .expect("`parent` exists") @@ -182,7 +181,11 @@ impl TestApi { let mut chain = self.chain.write(); chain.block_by_hash.insert(hash, block.clone()); - chain.block_by_number.entry(block_number).or_default().push((block, is_best_block.into())); + chain + .block_by_number + .entry(block_number) + .or_default() + .push((block, is_best_block.into())); } fn hash_and_length_inner(ex: &Extrinsic) -> (Hash, usize) { @@ -195,9 +198,7 @@ impl TestApi { /// Next time transaction pool will try to validate this /// extrinsic, api will return invalid result. pub fn add_invalid(&self, xts: &Extrinsic) { - self.chain.write().invalid_hashes.insert( - Self::hash_and_length_inner(xts).0 - ); + self.chain.write().invalid_hashes.insert(Self::hash_and_length_inner(xts).0); } /// Query validation requests received. @@ -242,7 +243,8 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { match self.block_id_to_number(at) { Ok(Some(number)) => { - let found_best = self.chain + let found_best = self + .chain .read() .block_by_number .get(&number) @@ -253,24 +255,24 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { // the transaction. (This is not required for this test function, but in real // environment it would fail because of this). if !found_best { - return ready(Ok( - Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(1)).into()) - )) + return ready(Ok(Err(TransactionValidityError::Invalid( + InvalidTransaction::Custom(1), + ) + .into()))) } }, - Ok(None) => return ready(Ok( - Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(2)).into()) - )), + Ok(None) => + return ready(Ok(Err(TransactionValidityError::Invalid( + InvalidTransaction::Custom(2), + ) + .into()))), Err(e) => return ready(Err(e)), } let (requires, provides) = if let Some(transfer) = uxt.try_transfer() { let chain_nonce = self.chain.read().nonces.get(&transfer.from).cloned().unwrap_or(0); - let requires = if chain_nonce == transfer.nonce { - vec![] - } else { - vec![vec![chain_nonce as u8]] - }; + let requires = + if chain_nonce == transfer.nonce { vec![] } else { vec![vec![chain_nonce as u8]] }; let provides = vec![vec![transfer.nonce as u8]]; (requires, provides) @@ -279,18 +281,13 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { }; if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { - return ready(Ok( - Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0)).into()) - )) + return ready(Ok(Err( + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)).into() + ))) } - let mut validity = ValidTransaction { - priority: 1, - requires, - provides, - longevity: 64, - propagate: true, - }; + let mut validity = + ValidTransaction { priority: 1, requires, provides, longevity: 64, propagate: true }; (self.valid_modifier.read())(&mut validity); @@ -302,11 +299,8 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { at: &BlockId, ) -> Result>, Error> { Ok(match at { - generic::BlockId::Hash(x) => self.chain - .read() - .block_by_hash - .get(x) - .map(|b| *b.header.number()), + generic::BlockId::Hash(x) => + self.chain.read().block_by_hash.get(x).map(|b| *b.header.number()), generic::BlockId::Number(num) => Some(*num), }) } @@ -317,11 +311,10 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { ) -> Result>, Error> { Ok(match at { generic::BlockId::Hash(x) => Some(x.clone()), - generic::BlockId::Number(num) => self.chain - .read() - .block_by_number - .get(num) - .and_then(|blocks| blocks.iter().find(|b| b.1.is_best()).map(|b| b.0.header().hash())), + generic::BlockId::Number(num) => + self.chain.read().block_by_number.get(num).and_then(|blocks| { + blocks.iter().find(|b| b.1.is_best()).map(|b| b.0.header().hash()) + }), }) } @@ -334,16 +327,10 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { fn block_body(&self, id: &BlockId) -> Self::BodyFuture { futures::future::ready(Ok(match id { - BlockId::Number(num) => self.chain - .read() - .block_by_number - .get(num) - .map(|b| b[0].0.extrinsics().to_vec()), - BlockId::Hash(hash) => self.chain - .read() - .block_by_hash - .get(hash) - .map(|b| b.extrinsics().to_vec()), + BlockId::Number(num) => + self.chain.read().block_by_number.get(num).map(|b| b[0].0.extrinsics().to_vec()), + BlockId::Hash(hash) => + self.chain.read().block_by_hash.get(hash).map(|b| b.extrinsics().to_vec()), })) } @@ -352,16 +339,10 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { at: &BlockId, ) -> Result::Header>, Self::Error> { Ok(match at { - BlockId::Number(num) => self.chain - .read() - .block_by_number - .get(num) - .map(|b| b[0].0.header().clone()), - BlockId::Hash(hash) => self.chain - .read() - .block_by_hash - .get(hash) - .map(|b| b.header().clone()), + BlockId::Number(num) => + self.chain.read().block_by_number.get(num).map(|b| b[0].0.header().clone()), + BlockId::Hash(hash) => + self.chain.read().block_by_hash.get(hash).map(|b| b.header().clone()), }) } } @@ -369,21 +350,14 @@ impl sc_transaction_pool::test_helpers::ChainApi for TestApi { impl sp_blockchain::HeaderMetadata for TestApi { type Error = Error; - fn header_metadata( - &self, - hash: Hash, - ) -> Result, Self::Error> { + fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { let chain = self.chain.read(); let block = chain.block_by_hash.get(&hash).expect("Hash exists"); Ok(block.header().into()) } - fn insert_header_metadata( - &self, - _: Hash, - _: CachedHeaderMetadata, - ) { + fn insert_header_metadata(&self, _: Hash, _: CachedHeaderMetadata) { unimplemented!("Not implemented for tests") } @@ -396,12 +370,7 @@ impl sp_blockchain::HeaderMetadata for TestApi { /// /// Part of the test api. pub fn uxt(who: AccountKeyring, nonce: Index) -> Extrinsic { - let transfer = Transfer { - from: who.into(), - to: AccountId::default(), - nonce, - amount: 1, - }; + let transfer = Transfer { from: who.into(), to: AccountId::default(), nonce, amount: 1 }; let signature = transfer.using_encoded(|e| who.sign(e)).into(); Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: false } } diff --git a/substrate/test-utils/src/lib.rs b/substrate/test-utils/src/lib.rs index b3a0f322a639f920c1880460fa7db7bcaf1087bc..eef87a29ca07e4ae498b1b6a62f4b6ceceb1c825 100644 --- a/substrate/test-utils/src/lib.rs +++ b/substrate/test-utils/src/lib.rs @@ -24,7 +24,7 @@ pub use futures; /// /// # Requirements /// -/// You must have tokio in the `[dev-dependencies]` of your crate to use this macro. +/// You must have tokio in the `[dev-dependencies]` of your crate to use this macro. /// /// # Example /// @@ -64,7 +64,7 @@ macro_rules! assert_eq_uvec { ( $x:expr, $y:expr $(,)? ) => { $crate::__assert_eq_uvec!($x, $y); $crate::__assert_eq_uvec!($y, $x); - } + }; } #[macro_export] @@ -72,7 +72,9 @@ macro_rules! assert_eq_uvec { macro_rules! __assert_eq_uvec { ( $x:expr, $y:expr ) => { $x.iter().for_each(|e| { - if !$y.contains(e) { panic!("vectors not equal: {:?} != {:?}", $x, $y); } + if !$y.contains(e) { + panic!("vectors not equal: {:?} != {:?}", $x, $y); + } }); - } + }; } diff --git a/substrate/test-utils/test-runner/src/client.rs b/substrate/test-utils/test-runner/src/client.rs index 4cadfe58c605acf1cd260d664c506673eddadcc0..71a156b8bc0d908ea0f7af8a71aab480b73ed651 100644 --- a/substrate/test-utils/test-runner/src/client.rs +++ b/substrate/test-utils/test-runner/src/client.rs @@ -16,204 +16,218 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . //! Client parts -use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use sp_consensus_babe::BabeApi; -use crate::{ChainInfo, default_config}; -use manual_seal::consensus::babe::{BabeConsensusDataProvider, SlotTimestampProvider}; -use sp_keyring::sr25519::Keyring::Alice; -use std::str::FromStr; -use sp_runtime::traits::Header; +use crate::{default_config, ChainInfo}; use futures::channel::mpsc; use jsonrpc_core::MetaIoHandler; -use manual_seal::{run_manual_seal, EngineCommand, ManualSealParams, import_queue, rpc::{ManualSeal, ManualSealApi}}; +use manual_seal::{ + consensus::babe::{BabeConsensusDataProvider, SlotTimestampProvider}, + import_queue, + rpc::{ManualSeal, ManualSealApi}, + run_manual_seal, EngineCommand, ManualSealParams, +}; use sc_client_api::backend::Backend; use sc_service::{ - build_network, spawn_tasks, BuildNetworkParams, SpawnTasksParams, TFullBackend, - TFullClient, TaskManager, new_full_parts, Configuration, ChainSpec, TaskExecutor, + build_network, new_full_parts, spawn_tasks, BuildNetworkParams, ChainSpec, Configuration, + SpawnTasksParams, TFullBackend, TFullClient, TaskExecutor, TaskManager, }; use sc_transaction_pool::BasicPool; use sc_transaction_pool_api::TransactionPool; use sp_api::{ApiExt, ConstructRuntimeApi, Core, Metadata}; use sp_block_builder::BlockBuilder; -use sp_runtime::traits::Block as BlockT; -use sp_session::SessionKeys; +use sp_consensus_babe::BabeApi; +use sp_keyring::sr25519::Keyring::Alice; use sp_offchain::OffchainWorkerApi; -use std::sync::Arc; +use sp_runtime::traits::{Block as BlockT, Header}; +use sp_session::SessionKeys; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use std::{str::FromStr, sync::Arc}; type ClientParts = ( - Arc>, - TaskManager, - Arc::Block, ::RuntimeApi, ::Executor>>, - Arc::Block, - Hash = <::Block as BlockT>::Hash, - Error = sc_transaction_pool::error::Error, - InPoolTransaction = sc_transaction_pool::Transaction< - <::Block as BlockT>::Hash, - <::Block as BlockT>::Extrinsic, - >, - >>, - mpsc::Sender::Block as BlockT>::Hash>>, - Arc::Block>>, + Arc>, + TaskManager, + Arc< + TFullClient< + ::Block, + ::RuntimeApi, + ::Executor, + >, + >, + Arc< + dyn TransactionPool< + Block = ::Block, + Hash = <::Block as BlockT>::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_pool::Transaction< + <::Block as BlockT>::Hash, + <::Block as BlockT>::Extrinsic, + >, + >, + >, + mpsc::Sender::Block as BlockT>::Hash>>, + Arc::Block>>, ); /// Provide the config or chain spec for a given chain pub enum ConfigOrChainSpec { - /// Configuration object - Config(Configuration), - /// Chain spec object - ChainSpec(Box, TaskExecutor) + /// Configuration object + Config(Configuration), + /// Chain spec object + ChainSpec(Box, TaskExecutor), } /// Creates all the client parts you need for [`Node`](crate::node::Node) -pub fn client_parts(config_or_chain_spec: ConfigOrChainSpec) -> Result, sc_service::Error> - where - T: ChainInfo + 'static, - >>::RuntimeApi: - Core + Metadata + OffchainWorkerApi + SessionKeys - + TaggedTransactionQueue + BlockBuilder + BabeApi - + ApiExt as Backend>::State>, - ::Call: From>, - <::Block as BlockT>::Hash: FromStr, - <<::Block as BlockT>::Header as Header>::Number: num_traits::cast::AsPrimitive, +pub fn client_parts( + config_or_chain_spec: ConfigOrChainSpec, +) -> Result, sc_service::Error> +where + T: ChainInfo + 'static, + , + >>::RuntimeApi: Core + + Metadata + + OffchainWorkerApi + + SessionKeys + + TaggedTransactionQueue + + BlockBuilder + + BabeApi + + ApiExt as Backend>::State>, + ::Call: From>, + <::Block as BlockT>::Hash: FromStr, + <<::Block as BlockT>::Header as Header>::Number: + num_traits::cast::AsPrimitive, { - use sp_consensus_babe::AuthorityId; - let config = match config_or_chain_spec { - ConfigOrChainSpec::Config(config) => config, - ConfigOrChainSpec::ChainSpec(chain_spec, task_executor) => { - default_config(task_executor, chain_spec) - }, - }; - - let (client, backend, keystore, mut task_manager) = - new_full_parts::(&config, None)?; - let client = Arc::new(client); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let (grandpa_block_import, ..) = - grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone(), None)?; - - let slot_duration = sc_consensus_babe::Config::get_or_compute(&*client)?; - let (block_import, babe_link) = sc_consensus_babe::block_import( - slot_duration.clone(), - grandpa_block_import, - client.clone(), - )?; - - let consensus_data_provider = BabeConsensusDataProvider::new( - client.clone(), - keystore.sync_keystore(), - babe_link.epoch_changes().clone(), - vec![(AuthorityId::from(Alice.public()), 1000)], - ) - .expect("failed to create ConsensusDataProvider"); - - let import_queue = - import_queue(Box::new(block_import.clone()), &task_manager.spawn_essential_handle(), None); - - let transaction_pool = BasicPool::new_full( - config.transaction_pool.clone(), - true.into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let (network, system_rpc_tx, network_starter) = { - let params = BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: None, - block_announce_validator_builder: None, - }; - build_network(params)? - }; - - // offchain workers - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - - // Proposer object for block authorship. - let env = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - config.prometheus_registry(), - None - ); - - // Channel for the rpc handler to communicate with the authorship task. - let (command_sink, commands_stream) = mpsc::channel(10); - - let rpc_sink = command_sink.clone(); - - let rpc_handlers = { - let params = SpawnTasksParams { - config, - client: client.clone(), - backend: backend.clone(), - task_manager: &mut task_manager, - keystore: keystore.sync_keystore(), - on_demand: None, - transaction_pool: transaction_pool.clone(), - rpc_extensions_builder: Box::new(move |_, _| { - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with( - ManualSealApi::to_delegate(ManualSeal::new(rpc_sink.clone())) - ); - io - }), - remote_blockchain: None, - network, - system_rpc_tx, - telemetry: None - }; - spawn_tasks(params)? - }; - - let cloned_client = client.clone(); - let create_inherent_data_providers = Box::new(move |_, _| { - let client = cloned_client.clone(); - async move { - let timestamp = SlotTimestampProvider::new(client.clone()).map_err(|err| format!("{:?}", err))?; - let babe = sp_consensus_babe::inherents::InherentDataProvider::new(timestamp.slot().into()); - Ok((timestamp, babe)) - } - }); - - // Background authorship future. - let authorship_future = run_manual_seal(ManualSealParams { - block_import, - env, - client: client.clone(), - pool: transaction_pool.clone(), - commands_stream, - select_chain, - consensus_data_provider: Some(Box::new(consensus_data_provider)), - create_inherent_data_providers, - }); - - // spawn the authorship task as an essential task. - task_manager - .spawn_essential_handle() - .spawn("manual-seal", authorship_future); - - network_starter.start_network(); - let rpc_handler = rpc_handlers.io_handler(); - - Ok(( - rpc_handler, - task_manager, - client, - transaction_pool, - command_sink, - backend, - )) + use sp_consensus_babe::AuthorityId; + let config = match config_or_chain_spec { + ConfigOrChainSpec::Config(config) => config, + ConfigOrChainSpec::ChainSpec(chain_spec, task_executor) => + default_config(task_executor, chain_spec), + }; + + let (client, backend, keystore, mut task_manager) = + new_full_parts::(&config, None)?; + let client = Arc::new(client); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let (grandpa_block_import, ..) = grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + None, + )?; + + let slot_duration = sc_consensus_babe::Config::get_or_compute(&*client)?; + let (block_import, babe_link) = sc_consensus_babe::block_import( + slot_duration.clone(), + grandpa_block_import, + client.clone(), + )?; + + let consensus_data_provider = BabeConsensusDataProvider::new( + client.clone(), + keystore.sync_keystore(), + babe_link.epoch_changes().clone(), + vec![(AuthorityId::from(Alice.public()), 1000)], + ) + .expect("failed to create ConsensusDataProvider"); + + let import_queue = + import_queue(Box::new(block_import.clone()), &task_manager.spawn_essential_handle(), None); + + let transaction_pool = BasicPool::new_full( + config.transaction_pool.clone(), + true.into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let (network, system_rpc_tx, network_starter) = { + let params = BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + }; + build_network(params)? + }; + + // offchain workers + sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + ); + + // Proposer object for block authorship. + let env = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + config.prometheus_registry(), + None, + ); + + // Channel for the rpc handler to communicate with the authorship task. + let (command_sink, commands_stream) = mpsc::channel(10); + + let rpc_sink = command_sink.clone(); + + let rpc_handlers = { + let params = SpawnTasksParams { + config, + client: client.clone(), + backend: backend.clone(), + task_manager: &mut task_manager, + keystore: keystore.sync_keystore(), + on_demand: None, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder: Box::new(move |_, _| { + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(ManualSealApi::to_delegate(ManualSeal::new(rpc_sink.clone()))); + io + }), + remote_blockchain: None, + network, + system_rpc_tx, + telemetry: None, + }; + spawn_tasks(params)? + }; + + let cloned_client = client.clone(); + let create_inherent_data_providers = Box::new(move |_, _| { + let client = cloned_client.clone(); + async move { + let timestamp = + SlotTimestampProvider::new(client.clone()).map_err(|err| format!("{:?}", err))?; + let babe = + sp_consensus_babe::inherents::InherentDataProvider::new(timestamp.slot().into()); + Ok((timestamp, babe)) + } + }); + + // Background authorship future. + let authorship_future = run_manual_seal(ManualSealParams { + block_import, + env, + client: client.clone(), + pool: transaction_pool.clone(), + commands_stream, + select_chain, + consensus_data_provider: Some(Box::new(consensus_data_provider)), + create_inherent_data_providers, + }); + + // spawn the authorship task as an essential task. + task_manager.spawn_essential_handle().spawn("manual-seal", authorship_future); + + network_starter.start_network(); + let rpc_handler = rpc_handlers.io_handler(); + + Ok((rpc_handler, task_manager, client, transaction_pool, command_sink, backend)) } diff --git a/substrate/test-utils/test-runner/src/host_functions.rs b/substrate/test-utils/test-runner/src/host_functions.rs index 534d4a23fdccb8319b527fc79a3c45a305f986d0..6bd91929256a399aae0b776fda82af83fbdfef6d 100644 --- a/substrate/test-utils/test-runner/src/host_functions.rs +++ b/substrate/test-utils/test-runner/src/host_functions.rs @@ -73,12 +73,16 @@ macro_rules! override_host_functions { pub struct SignatureVerificationOverride; impl sp_wasm_interface::HostFunctions for SignatureVerificationOverride { - fn host_functions() -> Vec<&'static dyn sp_wasm_interface::Function> { - override_host_functions!( - "ext_crypto_ecdsa_verify_version_1", EcdsaVerify, - "ext_crypto_ed25519_verify_version_1", Ed25519Verify, - "ext_crypto_sr25519_verify_version_1", Sr25519Verify, - "ext_crypto_sr25519_verify_version_2", Sr25519VerifyV2, - ) - } + fn host_functions() -> Vec<&'static dyn sp_wasm_interface::Function> { + override_host_functions!( + "ext_crypto_ecdsa_verify_version_1", + EcdsaVerify, + "ext_crypto_ed25519_verify_version_1", + Ed25519Verify, + "ext_crypto_sr25519_verify_version_1", + Sr25519Verify, + "ext_crypto_sr25519_verify_version_2", + Sr25519VerifyV2, + ) + } } diff --git a/substrate/test-utils/test-runner/src/lib.rs b/substrate/test-utils/test-runner/src/lib.rs index 1976d132b7c50125922e98964fb63a034711cf9b..c73ead9eb59ab98f39ce37d97e3cd7748ed9e483 100644 --- a/substrate/test-utils/test-runner/src/lib.rs +++ b/substrate/test-utils/test-runner/src/lib.rs @@ -187,12 +187,12 @@ //! fn simple_balances_test() { //! // given //! let config = NodeConfig { -//! execution_strategies: ExecutionStrategies { -//! syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, -//! importing: sc_client_api::ExecutionStrategy::NativeWhenPossible, -//! block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible, -//! offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible, -//! other: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! execution_strategies: ExecutionStrategies { +//! syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! importing: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible, +//! other: sc_client_api::ExecutionStrategy::NativeWhenPossible, //! }, //! chain_spec: Box::new(development_config()), //! log_targets: vec![], @@ -235,14 +235,14 @@ use sp_inherents::InherentDataProvider; use sp_runtime::traits::{Block as BlockT, SignedExtension}; mod client; +mod host_functions; mod node; mod utils; -mod host_functions; +pub use client::*; pub use host_functions::*; pub use node::*; pub use utils::*; -pub use client::*; /// Wrapper trait for concrete type required by this testing framework. pub trait ChainInfo: Sized { @@ -271,7 +271,10 @@ pub trait ChainInfo: Sized { + BlockImport< Self::Block, Error = sp_consensus::Error, - Transaction = TransactionFor, Self::Block>, + Transaction = TransactionFor< + TFullClient, + Self::Block, + >, > + 'static; /// The signed extras required by the runtime @@ -281,5 +284,7 @@ pub trait ChainInfo: Sized { type InherentDataProviders: InherentDataProvider + 'static; /// Signed extras, this function is caled in an externalities provided environment. - fn signed_extras(from: ::AccountId) -> Self::SignedExtras; + fn signed_extras( + from: ::AccountId, + ) -> Self::SignedExtras; } diff --git a/substrate/test-utils/test-runner/src/node.rs b/substrate/test-utils/test-runner/src/node.rs index b1e5854798eecef77f04adbc1baa60a0bc160249..83fc23681345d5ab0f0472e7da12247c98585df0 100644 --- a/substrate/test-utils/test-runner/src/node.rs +++ b/substrate/test-utils/test-runner/src/node.rs @@ -18,21 +18,28 @@ use std::sync::Arc; -use futures::{FutureExt, SinkExt, channel::{mpsc, oneshot}}; +use crate::ChainInfo; +use futures::{ + channel::{mpsc, oneshot}, + FutureExt, SinkExt, +}; use jsonrpc_core::MetaIoHandler; use manual_seal::EngineCommand; -use sc_client_api::{backend::{self, Backend}, CallExecutor, ExecutorProvider}; +use sc_client_api::{ + backend::{self, Backend}, + CallExecutor, ExecutorProvider, +}; use sc_service::{TFullBackend, TFullCallExecutor, TFullClient, TaskManager}; +use sc_transaction_pool_api::TransactionPool; use sp_api::{OverlayedChanges, StorageTransactionCache}; use sp_blockchain::HeaderBackend; use sp_core::ExecutionContext; use sp_runtime::{ generic::{BlockId, UncheckedExtrinsic}, - traits::{Block as BlockT, Header, Extrinsic, NumberFor}, - transaction_validity::TransactionSource, MultiSignature, MultiAddress + traits::{Block as BlockT, Extrinsic, Header, NumberFor}, + transaction_validity::TransactionSource, + MultiAddress, MultiSignature, }; -use crate::ChainInfo; -use sc_transaction_pool_api::TransactionPool; use sp_state_machine::Ext; /// This holds a reference to a running node on another thread, @@ -46,44 +53,51 @@ pub struct Node { /// client instance client: Arc>, /// transaction pool - pool: Arc::Block, - Hash = <::Block as BlockT>::Hash, - Error = sc_transaction_pool::error::Error, - InPoolTransaction = sc_transaction_pool::Transaction< - <::Block as BlockT>::Hash, - <::Block as BlockT>::Extrinsic, + pool: Arc< + dyn TransactionPool< + Block = ::Block, + Hash = <::Block as BlockT>::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_pool::Transaction< + <::Block as BlockT>::Hash, + <::Block as BlockT>::Extrinsic, + >, >, - >>, + >, /// channel to communicate with manual seal on. manual_seal_command_sink: mpsc::Sender::Hash>>, /// backend type. backend: Arc>, /// Block number at initialization of this Node. - initial_block_number: NumberFor + initial_block_number: NumberFor, } -type EventRecord = frame_system::EventRecord<::Event, ::Hash>; +type EventRecord = frame_system::EventRecord< + ::Event, + ::Hash, +>; impl Node - where - T: ChainInfo, - <::Header as Header>::Number: From, +where + T: ChainInfo, + <::Header as Header>::Number: From, { /// Creates a new node. pub fn new( rpc_handler: Arc>, task_manager: TaskManager, client: Arc>, - pool: Arc::Block, - Hash = <::Block as BlockT>::Hash, - Error = sc_transaction_pool::error::Error, - InPoolTransaction = sc_transaction_pool::Transaction< - <::Block as BlockT>::Hash, - <::Block as BlockT>::Extrinsic, + pool: Arc< + dyn TransactionPool< + Block = ::Block, + Hash = <::Block as BlockT>::Hash, + Error = sc_transaction_pool::error::Error, + InPoolTransaction = sc_transaction_pool::Transaction< + <::Block as BlockT>::Hash, + <::Block as BlockT>::Extrinsic, + >, >, - >>, + >, command_sink: mpsc::Sender::Hash>>, backend: Arc>, ) -> Self { @@ -102,10 +116,12 @@ impl Node /// eg /// ```ignore /// let request = r#"{"jsonrpc":"2.0","method":"engine_createBlock","params": [true, true],"id":1}"#; - /// let response = node.rpc_handler() + /// let response = node.rpc_handler() /// .handle_request_sync(request, Default::default()); /// ``` - pub fn rpc_handler(&self) -> Arc> { + pub fn rpc_handler( + &self, + ) -> Arc> { self.rpc_handler.clone() } @@ -117,13 +133,18 @@ impl Node /// Executes closure in an externalities provided environment. pub fn with_state(&self, closure: impl FnOnce() -> R) -> R where - as CallExecutor>::Error: std::fmt::Debug, + as CallExecutor>::Error: + std::fmt::Debug, { let id = BlockId::Hash(self.client.info().best_hash); let mut overlay = OverlayedChanges::default(); - let changes_trie = backend::changes_tries_state_at_block(&id, self.backend.changes_trie_storage()).unwrap(); - let mut cache = - StorageTransactionCache:: as Backend>::State>::default(); + let changes_trie = + backend::changes_tries_state_at_block(&id, self.backend.changes_trie_storage()) + .unwrap(); + let mut cache = StorageTransactionCache::< + T::Block, + as Backend>::State, + >::default(); let mut extensions = self .client .execution_extensions() @@ -176,7 +197,9 @@ impl Node .expect("UncheckedExtrinsic::new() always returns Some"); let at = self.client.info().best_hash; - self.pool.submit_one(&BlockId::Hash(at), TransactionSource::Local, ext.into()).await + self.pool + .submit_one(&BlockId::Hash(at), TransactionSource::Local, ext.into()) + .await } /// Get the events of the most recently produced block @@ -186,7 +209,7 @@ impl Node /// Instructs manual seal to seal new, possibly empty blocks. pub async fn seal_blocks(&self, num: usize) { - let mut sink = self.manual_seal_command_sink.clone(); + let mut sink = self.manual_seal_command_sink.clone(); for count in 0..num { let (sender, future_block) = oneshot::channel(); @@ -201,8 +224,10 @@ impl Node future.await.expect(ERROR); match future_block.await.expect(ERROR) { - Ok(block) => log::info!("sealed {} (hash: {}) of {} blocks", count + 1, block.hash, num), - Err(err) => log::error!("failed to seal block {} of {}, error: {:?}", count + 1, num, err), + Ok(block) => + log::info!("sealed {} (hash: {}) of {} blocks", count + 1, block.hash, num), + Err(err) => + log::error!("failed to seal block {} of {}, error: {:?}", count + 1, num, err), } } } diff --git a/substrate/test-utils/test-runner/src/utils.rs b/substrate/test-utils/test-runner/src/utils.rs index 9e722bcc510aa4c6e5222f39d16cdea0e7ac4f56..e0176fcb6cc2970f2746a97c0d03c07c1a2d85e0 100644 --- a/substrate/test-utils/test-runner/src/utils.rs +++ b/substrate/test-utils/test-runner/src/utils.rs @@ -16,18 +16,20 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use futures::FutureExt; +use sc_client_api::execution_extensions::ExecutionStrategies; +use sc_executor::WasmExecutionMethod; +use sc_informant::OutputFormat; +use sc_network::{ + config::{NetworkConfiguration, Role, TransportConfig}, + multiaddr, +}; use sc_service::{ - BasePath, ChainSpec, Configuration, TaskExecutor, - DatabaseConfig, KeepBlocks, TransactionStorageMode, TaskType, + config::KeystoreConfig, BasePath, ChainSpec, Configuration, DatabaseConfig, KeepBlocks, + TaskExecutor, TaskType, TransactionStorageMode, }; use sp_keyring::sr25519::Keyring::Alice; -use sc_network::{multiaddr, config::{NetworkConfiguration, TransportConfig, Role}}; -use sc_informant::OutputFormat; -use sc_service::config::KeystoreConfig; -use sc_executor::WasmExecutionMethod; -use sc_client_api::execution_extensions::ExecutionStrategies; use tokio::runtime::Handle; -use futures::FutureExt; pub use sc_cli::build_runtime; @@ -41,7 +43,10 @@ pub fn base_path() -> BasePath { } /// Produces a default configuration object, suitable for use with most set ups. -pub fn default_config(task_executor: TaskExecutor, mut chain_spec: Box) -> Configuration { +pub fn default_config( + task_executor: TaskExecutor, + mut chain_spec: Box, +) -> Configuration { let base_path = base_path(); let root_path = base_path.path().to_path_buf().join("chains").join(chain_spec.id()); @@ -62,9 +67,7 @@ pub fn default_config(task_executor: TaskExecutor, mut chain_spec: Box TaskExecutor { let task_executor = move |fut, task_type| match task_type { TaskType::Async => handle.spawn(fut).map(drop), - TaskType::Blocking => handle.spawn_blocking(move || futures::executor::block_on(fut)).map(drop), + TaskType::Blocking => + handle.spawn_blocking(move || futures::executor::block_on(fut)).map(drop), }; task_executor.into() diff --git a/substrate/utils/browser/src/lib.rs b/substrate/utils/browser/src/lib.rs index 0d4937ceeee436bba17e43c2d9f88aa1290f20ec..0870ea84296c02f1d74ef81e8c4ca361b01b90c7 100644 --- a/substrate/utils/browser/src/lib.rs +++ b/substrate/utils/browser/src/lib.rs @@ -15,23 +15,25 @@ // See the License for the specific language governing permissions and // limitations under the License. +use futures::{ + channel::{mpsc, oneshot}, + compat::*, + future::{ok, ready, select}, + prelude::*, +}; use futures01::sync::mpsc as mpsc01; +use libp2p_wasm_ext::{ffi, ExtTransport}; use log::{debug, info}; +use sc_chain_spec::Extension; use sc_network::config::TransportConfig; use sc_service::{ - RpcSession, Role, Configuration, TaskManager, RpcHandlers, config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration}, - GenericChainSpec, RuntimeGenesis, - KeepBlocks, TransactionStorageMode, + Configuration, GenericChainSpec, KeepBlocks, Role, RpcHandlers, RpcSession, RuntimeGenesis, + TaskManager, TransactionStorageMode, }; use sc_tracing::logging::LoggerBuilder; -use wasm_bindgen::prelude::*; -use futures::{ - prelude::*, channel::{oneshot, mpsc}, compat::*, future::{ready, ok, select} -}; use std::pin::Pin; -use sc_chain_spec::Extension; -use libp2p_wasm_ext::{ExtTransport, ffi}; +use wasm_bindgen::prelude::*; pub use console_error_panic_hook::set_once as set_console_error_panic_hook; @@ -73,7 +75,8 @@ where task_executor: (|fut, _| { wasm_bindgen_futures::spawn_local(fut); async {} - }).into(), + }) + .into(), telemetry_external_transport: Some(transport), role: Role::Light, database: { @@ -114,9 +117,7 @@ where max_runtime_instances: 8, announce_block: true, base_path: None, - informant_output_format: sc_informant::OutputFormat { - enable_color: false, - }, + informant_output_format: sc_informant::OutputFormat { enable_color: false }, disable_log_reloading: false, }; @@ -153,12 +154,11 @@ pub fn start_client(mut task_manager: TaskManager, rpc_handlers: RpcHandlers) -> Box::pin(async move { let _ = task_manager.future().await; }), - ).map(drop) + ) + .map(drop), ); - Client { - rpc_send_tx, - } + Client { rpc_send_tx } } #[wasm_bindgen] @@ -175,12 +175,8 @@ impl Client { }); wasm_bindgen_futures::future_to_promise(async { match rx.await { - Ok(fut) => { - fut.await - .map(|s| JsValue::from_str(&s)) - .ok_or_else(|| JsValue::NULL) - }, - Err(_) => Err(JsValue::NULL) + Ok(fut) => fut.await.map(|s| JsValue::from_str(&s)).ok_or_else(|| JsValue::NULL), + Err(_) => Err(JsValue::NULL), } }) } @@ -203,7 +199,8 @@ impl Client { }); wasm_bindgen_futures::spawn_local(async move { - let _ = rx.compat() + let _ = rx + .compat() .try_for_each(|s| { let _ = callback.call1(&callback, &JsValue::from_str(&s)); ok(()) diff --git a/substrate/utils/build-script-utils/src/git.rs b/substrate/utils/build-script-utils/src/git.rs index d01343634bc94eb389d1c3721d7161d29dfa748b..66a15737f84ca9d5848b66d3bbb31f64c81b8daf 100644 --- a/substrate/utils/build-script-utils/src/git.rs +++ b/substrate/utils/build-script-utils/src/git.rs @@ -33,16 +33,16 @@ pub fn rerun_if_git_head_changed() { Err(err) => { eprintln!("cargo:warning=Unable to read the Git repository: {}", err); - return; - } - Ok(None) => {} + return + }, + Ok(None) => {}, Ok(Some(paths)) => { for p in paths { println!("cargo:rerun-if-changed={}", p.display()); } - return; - } + return + }, } manifest_dir.pop(); diff --git a/substrate/utils/build-script-utils/src/lib.rs b/substrate/utils/build-script-utils/src/lib.rs index 8eb17a7de61fb37e58244ad62437f361255fc9fe..0c45c4b34ebe8d9cd00c4f2ab82cbf01e60296d8 100644 --- a/substrate/utils/build-script-utils/src/lib.rs +++ b/substrate/utils/build-script-utils/src/lib.rs @@ -17,8 +17,8 @@ //! Crate with utility functions for `build.rs` scripts. -mod version; mod git; +mod version; pub use git::*; pub use version::*; diff --git a/substrate/utils/build-script-utils/src/version.rs b/substrate/utils/build-script-utils/src/version.rs index f92c637c78cca2a7c2af5f4e3fd98090c533339b..52336eb0b6a2465ef8886429aa08264b48b87bcb 100644 --- a/substrate/utils/build-script-utils/src/version.rs +++ b/substrate/utils/build-script-utils/src/version.rs @@ -20,15 +20,13 @@ use std::{borrow::Cow, process::Command}; /// Generate the `cargo:` key output pub fn generate_cargo_keys() { - let output = Command::new("git") - .args(&["rev-parse", "--short", "HEAD"]) - .output(); + let output = Command::new("git").args(&["rev-parse", "--short", "HEAD"]).output(); let commit = match output { Ok(o) if o.status.success() => { let sha = String::from_utf8_lossy(&o.stdout).trim().to_owned(); Cow::from(sha) - } + }, Ok(o) => { println!("cargo:warning=Git command failed with status: {}", o.status); Cow::from("unknown") diff --git a/substrate/utils/fork-tree/src/lib.rs b/substrate/utils/fork-tree/src/lib.rs index d1ec67d37b95477046693c2be8951a57cc7de9ba..f22d54d3d1a43c2f993ee72d3a415e46e361503f 100644 --- a/substrate/utils/fork-tree/src/lib.rs +++ b/substrate/utils/fork-tree/src/lib.rs @@ -20,9 +20,8 @@ #![warn(missing_docs)] -use std::cmp::Reverse; -use std::fmt; use codec::{Decode, Encode}; +use std::{cmp::Reverse, fmt}; /// Error occurred when iterating with the tree. #[derive(Clone, Debug, PartialEq)] @@ -83,7 +82,8 @@ pub struct ForkTree { best_finalized_number: Option, } -impl ForkTree where +impl ForkTree +where H: PartialEq + Clone, N: Ord + Clone, V: Clone, @@ -102,17 +102,14 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { - let new_root_index = self.find_node_index_where( - hash, - number, - is_descendent_of, - predicate, - )?; + let new_root_index = + self.find_node_index_where(hash, number, is_descendent_of, predicate)?; let removed = if let Some(mut root_index) = new_root_index { let mut old_roots = std::mem::take(&mut self.roots); @@ -130,9 +127,10 @@ impl ForkTree where } } - let mut root = root - .expect("find_node_index_where will return array with at least one index; \ - this results in at least one item in removed; qed"); + let mut root = root.expect( + "find_node_index_where will return array with at least one index; \ + this results in at least one item in removed; qed", + ); let mut removed = old_roots; @@ -144,7 +142,7 @@ impl ForkTree where for child in root_children { if is_first && (child.number == *number && child.hash == *hash || - child.number < *number && is_descendent_of(&child.hash, hash)?) + child.number < *number && is_descendent_of(&child.hash, hash)?) { root.children.push(child); // assuming that the tree is well formed only one child should pass this requirement @@ -168,16 +166,14 @@ impl ForkTree where } } -impl ForkTree where +impl ForkTree +where H: PartialEq, N: Ord, { /// Create a new empty tree. pub fn new() -> ForkTree { - ForkTree { - roots: Vec::new(), - best_finalized_number: None, - } + ForkTree { roots: Vec::new(), best_finalized_number: None } } /// Rebalance the tree, i.e. sort child nodes by max branch depth @@ -209,18 +205,19 @@ impl ForkTree where mut data: V, is_descendent_of: &F, ) -> Result> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } for root in self.roots.iter_mut() { if root.hash == hash { - return Err(Error::Duplicate); + return Err(Error::Duplicate) } match root.import(hash, number, data, is_descendent_of)? { @@ -231,17 +228,12 @@ impl ForkTree where }, None => { self.rebalance(); - return Ok(false); + return Ok(false) }, } } - self.roots.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); + self.roots.push(Node { data, hash, number, children: Vec::new() }); self.rebalance(); @@ -249,18 +241,18 @@ impl ForkTree where } /// Iterates over the existing roots in the tree. - pub fn roots(&self) -> impl Iterator { + pub fn roots(&self) -> impl Iterator { self.roots.iter().map(|node| (&node.hash, &node.number, &node.data)) } - fn node_iter(&self) -> impl Iterator> { + fn node_iter(&self) -> impl Iterator> { // we need to reverse the order of roots to maintain the expected // ordering since the iterator uses a stack to track state. ForkTreeIterator { stack: self.roots.iter().rev().collect() } } /// Iterates the nodes in the tree in pre-order. - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.node_iter().map(|node| (&node.hash, &node.number, &node.data)) } @@ -274,7 +266,8 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> where + ) -> Result>, Error> + where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -285,7 +278,7 @@ impl ForkTree where // found the node, early exit if let FindOutcome::Found(node) = node { - return Ok(Some(node)); + return Ok(Some(node)) } } @@ -293,23 +286,13 @@ impl ForkTree where } /// Map fork tree into values of new types. - pub fn map( - self, - f: &mut F, - ) -> ForkTree where + pub fn map(self, f: &mut F) -> ForkTree + where F: FnMut(&H, &N, V) -> VT, { - let roots = self.roots - .into_iter() - .map(|root| { - root.map(f) - }) - .collect(); - - ForkTree { - roots, - best_finalized_number: self.best_finalized_number, - } + let roots = self.roots.into_iter().map(|root| root.map(f)).collect(); + + ForkTree { roots, best_finalized_number: self.best_finalized_number } } /// Same as [`find_node_where`](ForkTree::find_node_where), but returns mutable reference. @@ -319,7 +302,8 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> where + ) -> Result>, Error> + where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -330,7 +314,7 @@ impl ForkTree where // found the node, early exit if let FindOutcome::Found(node) = node { - return Ok(Some(node)); + return Ok(Some(node)) } } @@ -344,7 +328,8 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> where + ) -> Result>, Error> + where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -356,7 +341,7 @@ impl ForkTree where // found the node, early exit if let FindOutcome::Found(mut node) = node { node.push(index); - return Ok(Some(node)); + return Ok(Some(node)) } } @@ -367,7 +352,9 @@ impl ForkTree where /// with the given hash exists. All other roots are pruned, and the children /// of the finalized node become the new roots. pub fn finalize_root(&mut self, hash: &H) -> Option { - self.roots.iter().position(|node| node.hash == *hash) + self.roots + .iter() + .position(|node| node.hash == *hash) .map(|position| self.finalize_root_at(position)) } @@ -376,7 +363,7 @@ impl ForkTree where let node = self.roots.swap_remove(position); self.roots = node.children; self.best_finalized_number = Some(node.number); - return node.data; + return node.data } /// Finalize a node in the tree. This method will make sure that the node @@ -390,24 +377,25 @@ impl ForkTree where number: N, is_descendent_of: &F, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); + return Ok(FinalizationResult::Changed(Some(root))) } // make sure we're not finalizing a descendent of any root for root in self.roots.iter() { if number > root.number && is_descendent_of(&root.hash, hash)? { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } @@ -443,18 +431,19 @@ impl ForkTree where number: N, is_descendent_of: &F, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } // check if one of the current roots is being finalized if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); + return Ok(FinalizationResult::Changed(Some(root))) } // we need to: @@ -469,23 +458,21 @@ impl ForkTree where let is_finalized = root.hash == *hash; let is_descendant = !is_finalized && root.number > number && is_descendent_of(hash, &root.hash)?; - let is_ancestor = !is_finalized - && !is_descendant && root.number < number - && is_descendent_of(&root.hash, hash)?; + let is_ancestor = !is_finalized && + !is_descendant && root.number < number && + is_descendent_of(&root.hash, hash)?; (is_finalized, is_descendant, is_ancestor) }; // if we have met finalized root - open it and return if is_finalized { - return Ok(FinalizationResult::Changed(Some( - self.finalize_root_at(idx), - ))); + return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))) } // if node is descendant of finalized block - just leave it as is if is_descendant { idx += 1; - continue; + continue } // if node is ancestor of finalized block - remove it and continue with children @@ -493,7 +480,7 @@ impl ForkTree where let root = self.roots.swap_remove(idx); self.roots.extend(root.children); changed = true; - continue; + continue } // if node is neither ancestor, nor descendant of the finalized block - remove it @@ -526,13 +513,14 @@ impl ForkTree where is_descendent_of: &F, predicate: P, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } @@ -544,11 +532,11 @@ impl ForkTree where if node.hash == *hash || is_descendent_of(&node.hash, hash)? { for node in node.children.iter() { if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } - return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); + return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))) } } } @@ -570,13 +558,14 @@ impl ForkTree where is_descendent_of: &F, predicate: P, ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { if let Some(ref best_finalized_number) = self.best_finalized_number { if number <= *best_finalized_number { - return Err(Error::Revert); + return Err(Error::Revert) } } @@ -589,12 +578,12 @@ impl ForkTree where if root.hash == *hash || is_descendent_of(&root.hash, hash)? { for node in root.children.iter() { if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); + return Err(Error::UnfinalizedAncestor) } } position = Some(i); - break; + break } } } @@ -616,9 +605,9 @@ impl ForkTree where let roots = std::mem::take(&mut self.roots); for root in roots { - let retain = root.number > number && is_descendent_of(hash, &root.hash)? - || root.number == number && root.hash == *hash - || is_descendent_of(&root.hash, hash)?; + let retain = root.number > number && is_descendent_of(hash, &root.hash)? || + root.number == number && root.hash == *hash || + is_descendent_of(&root.hash, hash)?; if retain { self.roots.push(root); @@ -681,26 +670,14 @@ mod node_implementation { } /// Map node data into values of new types. - pub fn map( - self, - f: &mut F, - ) -> Node where + pub fn map(self, f: &mut F) -> Node + where F: FnMut(&H, &N, V) -> VT, { - let children = self.children - .into_iter() - .map(|node| { - node.map(f) - }) - .collect(); + let children = self.children.into_iter().map(|node| node.map(f)).collect(); let vt = f(&self.hash, &self.number, self.data); - Node { - hash: self.hash, - number: self.number, - data: vt, - children, - } + Node { hash: self.hash, number: self.number, data: vt, children } } pub fn import( @@ -710,14 +687,17 @@ mod node_implementation { mut data: V, is_descendent_of: &F, ) -> Result, Error> - where E: fmt::Debug, - F: Fn(&H, &H) -> Result, + where + E: fmt::Debug, + F: Fn(&H, &H) -> Result, { if self.hash == hash { - return Err(Error::Duplicate); + return Err(Error::Duplicate) }; - if number <= self.number { return Ok(Some((hash, number, data))); } + if number <= self.number { + return Ok(Some((hash, number, data))) + } for node in self.children.iter_mut() { match node.import(hash, number, data, is_descendent_of)? { @@ -731,12 +711,7 @@ mod node_implementation { } if is_descendent_of(&self.hash, &hash)? { - self.children.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); + self.children.push(Node { data, hash, number, children: Vec::new() }); Ok(None) } else { @@ -760,13 +735,14 @@ mod node_implementation { is_descendent_of: &F, predicate: &P, ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { // stop searching this branch if *number < self.number { - return Ok(FindOutcome::Failure(false)); + return Ok(FindOutcome::Failure(false)) } let mut known_descendent_of = false; @@ -785,7 +761,7 @@ mod node_implementation { // then it cannot be a descendent of any others, // so we don't search them. known_descendent_of = true; - break; + break }, FindOutcome::Failure(false) => {}, } @@ -799,7 +775,7 @@ mod node_implementation { if is_descendent_of { // if the predicate passes we return the node if predicate(&self.data) { - return Ok(FindOutcome::Found(Vec::new())); + return Ok(FindOutcome::Found(Vec::new())) } } @@ -820,9 +796,10 @@ mod node_implementation { is_descendent_of: &F, predicate: &P, ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; @@ -852,9 +829,10 @@ mod node_implementation { is_descendent_of: &F, predicate: &P, ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; @@ -875,7 +853,7 @@ mod node_implementation { } // Workaround for: https://github.com/rust-lang/rust/issues/34537 -use node_implementation::{Node, FindOutcome}; +use node_implementation::{FindOutcome, Node}; struct ForkTreeIterator<'a, H, N, V> { stack: Vec<&'a Node>, @@ -917,7 +895,7 @@ impl Iterator for RemovedIterator { #[cfg(test)] mod test { - use super::{FinalizationResult, ForkTree, Error}; + use super::{Error, FinalizationResult, ForkTree}; #[derive(Debug, PartialEq)] struct TestError; @@ -930,10 +908,10 @@ mod test { impl std::error::Error for TestError {} - fn test_fork_tree<'a>() -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { + fn test_fork_tree<'a>( + ) -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { let mut tree = ForkTree::new(); - // // - B - C - D - E // / // / - G @@ -959,7 +937,8 @@ mod test { ("C", b) => Ok(b == "D" || b == "E"), ("D", b) => Ok(b == "E"), ("E", _) => Ok(false), - ("F", b) => Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "O"), + ("F", b) => + Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "O"), ("G", _) => Ok(false), ("H", b) => Ok(b == "I" || b == "L" || b == "M" || b == "O"), ("I", _) => Ok(false), @@ -1001,40 +980,22 @@ mod test { tree.finalize_root(&"A"); - assert_eq!( - tree.best_finalized_number, - Some(1), - ); + assert_eq!(tree.best_finalized_number, Some(1),); - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Revert), - ); + assert_eq!(tree.import("A", 1, (), &is_descendent_of), Err(Error::Revert),); } #[test] fn import_doesnt_add_duplicates() { let (mut tree, is_descendent_of) = test_fork_tree(); - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("A", 1, (), &is_descendent_of), Err(Error::Duplicate),); - assert_eq!( - tree.import("I", 4, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("I", 4, (), &is_descendent_of), Err(Error::Duplicate),); - assert_eq!( - tree.import("G", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("G", 3, (), &is_descendent_of), Err(Error::Duplicate),); - assert_eq!( - tree.import("K", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); + assert_eq!(tree.import("K", 3, (), &is_descendent_of), Err(Error::Duplicate),); } #[test] @@ -1096,10 +1057,7 @@ mod test { let original_roots = tree.roots.clone(); // finalizing a block prior to any in the node doesn't change the tree - assert_eq!( - tree.finalize(&"0", 0, &is_descendent_of), - Ok(FinalizationResult::Unchanged), - ); + assert_eq!(tree.finalize(&"0", 0, &is_descendent_of), Ok(FinalizationResult::Unchanged),); assert_eq!(tree.roots, original_roots); @@ -1115,21 +1073,12 @@ mod test { ); // finalizing anything lower than what we observed will fail - assert_eq!( - tree.best_finalized_number, - Some(1), - ); + assert_eq!(tree.best_finalized_number, Some(1),); - assert_eq!( - tree.finalize(&"Z", 1, &is_descendent_of), - Err(Error::Revert), - ); + assert_eq!(tree.finalize(&"Z", 1, &is_descendent_of), Err(Error::Revert),); // trying to finalize a node without finalizing its ancestors first will fail - assert_eq!( - tree.finalize(&"H", 3, &is_descendent_of), - Err(Error::UnfinalizedAncestor), - ); + assert_eq!(tree.finalize(&"H", 3, &is_descendent_of), Err(Error::UnfinalizedAncestor),); // after finalizing "F" we can finalize "H" assert_eq!( @@ -1195,10 +1144,7 @@ mod test { vec![("L", 4), ("I", 4)], ); - assert_eq!( - tree.best_finalized_number, - Some(3), - ); + assert_eq!(tree.best_finalized_number, Some(3),); // finalizing N (which is not a part of the tree): // 1) removes roots that are not ancestors/descendants of N (I) @@ -1215,23 +1161,20 @@ mod test { vec![], ); - assert_eq!( - tree.best_finalized_number, - Some(6), - ); + assert_eq!(tree.best_finalized_number, Some(6),); } #[test] fn finalize_with_descendent_works() { #[derive(Debug, PartialEq)] - struct Change { effective: u64 } + struct Change { + effective: u64, + } let (mut tree, is_descendent_of) = { let mut tree = ForkTree::new(); let is_descendent_of = |base: &&str, block: &&str| -> Result { - - // // A0 #1 - (B #2) - (C #5) - D #10 - E #15 - (F #100) // \ // - (G #100) @@ -1270,24 +1213,15 @@ mod test { // finalizing "D" will finalize a block from the tree, but it can't be applied yet // since it is not a root change assert_eq!( - tree.finalizes_any_with_descendent_if( - &"D", - 10, - &is_descendent_of, - |c| c.effective == 10, - ), + tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective == + 10,), Ok(Some(false)), ); // finalizing "B" doesn't finalize "A0" since the predicate doesn't pass, // although it will clear out "A1" from the tree assert_eq!( - tree.finalize_with_descendent_if( - &"B", - 2, - &is_descendent_of, - |c| c.effective <= 2, - ), + tree.finalize_with_descendent_if(&"B", 2, &is_descendent_of, |c| c.effective <= 2,), Ok(FinalizationResult::Changed(None)), ); @@ -1308,12 +1242,7 @@ mod test { ); assert_eq!( - tree.finalize_with_descendent_if( - &"C", - 5, - &is_descendent_of, - |c| c.effective <= 5, - ), + tree.finalize_with_descendent_if(&"C", 5, &is_descendent_of, |c| c.effective <= 5,), Ok(FinalizationResult::Changed(Some(Change { effective: 5 }))), ); @@ -1324,33 +1253,20 @@ mod test { // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first assert_eq!( - tree.finalizes_any_with_descendent_if( - &"F", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), + tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective <= + 100,), Err(Error::UnfinalizedAncestor), ); // it will work with "G" though since it is not in the same branch as "E" assert_eq!( - tree.finalizes_any_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), + tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= + 100,), Ok(Some(true)), ); assert_eq!( - tree.finalize_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), + tree.finalize_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= 100,), Ok(FinalizationResult::Changed(Some(Change { effective: 10 }))), ); @@ -1365,12 +1281,19 @@ mod test { tree.iter().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), vec![ ("A", 1), - ("B", 2), ("C", 3), ("D", 4), ("E", 5), - ("F", 2), ("H", 3), ("L", 4), ("M", 5), + ("B", 2), + ("C", 3), + ("D", 4), + ("E", 5), + ("F", 2), + ("H", 3), + ("L", 4), + ("M", 5), ("O", 5), ("I", 4), ("G", 3), - ("J", 2), ("K", 3), + ("J", 2), + ("K", 3), ], ); } @@ -1400,19 +1323,11 @@ mod test { // "L" is a descendent of "K", but the predicate will only pass for "K", // therefore only one call to `is_descendent_of` should be made assert_eq!( - tree.finalizes_any_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), + tree.finalizes_any_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), Ok(Some(false)), ); - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1,); } n_is_descendent_of_calls.store(0, Ordering::SeqCst); @@ -1431,19 +1346,11 @@ mod test { // "L" is a descendent of "K", but the predicate will only pass for "K", // therefore only one call to `is_descendent_of` should be made assert_eq!( - tree.finalize_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), + tree.finalize_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), Ok(FinalizationResult::Changed(Some(10))), ); - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1,); } } @@ -1451,12 +1358,7 @@ mod test { fn find_node_works() { let (tree, is_descendent_of) = test_fork_tree(); - let node = tree.find_node_where( - &"D", - &4, - &is_descendent_of, - &|_| true, - ).unwrap().unwrap(); + let node = tree.find_node_where(&"D", &4, &is_descendent_of, &|_| true).unwrap().unwrap(); assert_eq!(node.hash, "C"); assert_eq!(node.number, 3); @@ -1473,17 +1375,9 @@ mod test { fn prune_works() { let (mut tree, is_descendent_of) = test_fork_tree(); - let removed = tree.prune( - &"C", - &3, - &is_descendent_of, - &|_| true, - ).unwrap(); + let removed = tree.prune(&"C", &3, &is_descendent_of, &|_| true).unwrap(); - assert_eq!( - tree.roots.iter().map(|node| node.hash).collect::>(), - vec!["B"], - ); + assert_eq!(tree.roots.iter().map(|node| node.hash).collect::>(), vec!["B"],); assert_eq!( tree.iter().map(|(hash, _, _)| *hash).collect::>(), @@ -1495,34 +1389,19 @@ mod test { vec!["A", "F", "H", "L", "M", "O", "I", "G", "J", "K"] ); - let removed = tree.prune( - &"E", - &5, - &is_descendent_of, - &|_| true, - ).unwrap(); + let removed = tree.prune(&"E", &5, &is_descendent_of, &|_| true).unwrap(); - assert_eq!( - tree.roots.iter().map(|node| node.hash).collect::>(), - vec!["D"], - ); + assert_eq!(tree.roots.iter().map(|node| node.hash).collect::>(), vec!["D"],); - assert_eq!( - tree.iter().map(|(hash, _, _)| *hash).collect::>(), - vec!["D", "E"], - ); + assert_eq!(tree.iter().map(|(hash, _, _)| *hash).collect::>(), vec!["D", "E"],); - assert_eq!( - removed.map(|(hash, _, _)| hash).collect::>(), - vec!["B", "C"] - ); + assert_eq!(removed.map(|(hash, _, _)| hash).collect::>(), vec!["B", "C"]); } #[test] fn find_node_backtracks_after_finding_highest_descending_node() { let mut tree = ForkTree::new(); - // // A - B // \ // — C @@ -1543,12 +1422,7 @@ mod test { // when searching the tree we reach node `C`, but the // predicate doesn't pass. we should backtrack to `B`, but not to `A`, // since "B" fulfills the predicate. - let node = tree.find_node_where( - &"D", - &3, - &is_descendent_of, - &|data| *data < 3, - ).unwrap(); + let node = tree.find_node_where(&"D", &3, &is_descendent_of, &|data| *data < 3).unwrap(); assert_eq!(node.unwrap().hash, "B"); } diff --git a/substrate/utils/frame/benchmarking-cli/src/command.rs b/substrate/utils/frame/benchmarking-cli/src/command.rs index 3bfb639dd9eb7f4f4dfe51f1a13d6710780df15d..2ef9f3914a5d7153d6b31d7c3613376e34ab4c5a 100644 --- a/substrate/utils/frame/benchmarking-cli/src/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/command.rs @@ -19,7 +19,7 @@ use crate::BenchmarkCmd; use codec::{Decode, Encode}; use frame_benchmarking::{Analysis, BenchmarkBatch, BenchmarkSelector}; use frame_support::traits::StorageInfo; -use sc_cli::{SharedParams, CliConfiguration, ExecutionStrategy, Result}; +use sc_cli::{CliConfiguration, ExecutionStrategy, Result, SharedParams}; use sc_client_db::BenchmarkingState; use sc_executor::NativeExecutor; use sc_service::{Configuration, NativeExecutionDispatch}; @@ -49,11 +49,15 @@ impl BenchmarkCmd { } if let Some(header_file) = &self.header { - if !header_file.is_file() { return Err("Header file is invalid!".into()) }; + if !header_file.is_file() { + return Err("Header file is invalid!".into()) + }; } if let Some(handlebars_template_file) = &self.template { - if !handlebars_template_file.is_file() { return Err("Handlebars template file is invalid!".into()) }; + if !handlebars_template_file.is_file() { + return Err("Handlebars template file is invalid!".into()) + }; } let spec = config.chain_spec; @@ -93,7 +97,8 @@ impl BenchmarkCmd { self.repeat, !self.no_verify, self.extra, - ).encode(), + ) + .encode(), extensions, &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, sp_core::testing::TaskExecutor::new(), @@ -126,20 +131,25 @@ impl BenchmarkCmd { ); // Skip raw data + analysis if there are no results - if batch.results.is_empty() { continue } + if batch.results.is_empty() { + continue + } if self.raw_data { // Print the table header - batch.results[0].components.iter().for_each(|param| print!("{:?},", param.0)); + batch.results[0] + .components + .iter() + .for_each(|param| print!("{:?},", param.0)); print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n"); // Print the values batch.results.iter().for_each(|result| { - let parameters = &result.components; parameters.iter().for_each(|param| print!("{:?},", param.1)); // Print extrinsic time and storage root time - print!("{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n", + print!( + "{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n", result.extrinsic_time, result.storage_root_time, result.reads, @@ -156,25 +166,39 @@ impl BenchmarkCmd { // Conduct analysis. if !self.no_median_slopes { println!("Median Slopes Analysis\n========"); - if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::ExtrinsicTime) { + if let Some(analysis) = Analysis::median_slopes( + &batch.results, + BenchmarkSelector::ExtrinsicTime, + ) { println!("-- Extrinsic Time --\n{}", analysis); } - if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::Reads) { + if let Some(analysis) = + Analysis::median_slopes(&batch.results, BenchmarkSelector::Reads) + { println!("Reads = {:?}", analysis); } - if let Some(analysis) = Analysis::median_slopes(&batch.results, BenchmarkSelector::Writes) { + if let Some(analysis) = + Analysis::median_slopes(&batch.results, BenchmarkSelector::Writes) + { println!("Writes = {:?}", analysis); } } if !self.no_min_squares { println!("Min Squares Analysis\n========"); - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime) { + if let Some(analysis) = Analysis::min_squares_iqr( + &batch.results, + BenchmarkSelector::ExtrinsicTime, + ) { println!("-- Extrinsic Time --\n{}", analysis); } - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads) { + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads) + { println!("Reads = {:?}", analysis); } - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes) { + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes) + { println!("Writes = {:?}", analysis); } } diff --git a/substrate/utils/frame/benchmarking-cli/src/writer.rs b/substrate/utils/frame/benchmarking-cli/src/writer.rs index 64a4ea62f0d4c74e79e03114359511bb30a64fa0..16c93081ac6e1979c1882e0096e1c21e41aea04d 100644 --- a/substrate/utils/frame/benchmarking-cli/src/writer.rs +++ b/substrate/utils/frame/benchmarking-cli/src/writer.rs @@ -17,21 +17,23 @@ // Outputs benchmark results to Rust files that can be ingested by the runtime. -use std::collections::{HashMap, HashSet}; -use std::fs; -use std::path::PathBuf; use core::convert::TryInto; +use std::{ + collections::{HashMap, HashSet}, + fs, + path::PathBuf, +}; -use serde::Serialize; use inflector::Inflector; +use serde::Serialize; use crate::BenchmarkCmd; use frame_benchmarking::{ - BenchmarkBatch, BenchmarkSelector, Analysis, AnalysisChoice, RegressionModel, BenchmarkResults, + Analysis, AnalysisChoice, BenchmarkBatch, BenchmarkResults, BenchmarkSelector, RegressionModel, }; +use frame_support::traits::StorageInfo; use sp_core::hexdisplay::HexDisplay; use sp_runtime::traits::Zero; -use frame_support::traits::StorageInfo; const VERSION: &'static str = env!("CARGO_PKG_VERSION"); const TEMPLATE: &str = include_str!("./template.hbs"); @@ -117,7 +119,9 @@ fn map_results( analysis_choice: &AnalysisChoice, ) -> Result>, std::io::Error> { // Skip if batches is empty. - if batches.is_empty() { return Err(io_error("empty batches")) } + if batches.is_empty() { + return Err(io_error("empty batches")) + } let mut all_benchmarks = HashMap::new(); let mut pallet_benchmarks = Vec::new(); @@ -125,7 +129,9 @@ fn map_results( let mut batches_iter = batches.iter().peekable(); while let Some(batch) = batches_iter.next() { // Skip if there are no results - if batch.results.is_empty() { continue } + if batch.results.is_empty() { + continue + } let pallet_string = String::from_utf8(batch.pallet.clone()).unwrap(); let instance_string = String::from_utf8(batch.instance.clone()).unwrap(); @@ -150,13 +156,11 @@ fn map_results( } // Get an iterator of errors from a model. If the model is `None` all errors are zero. -fn extract_errors(model: &Option) -> impl Iterator + '_ { +fn extract_errors(model: &Option) -> impl Iterator + '_ { let mut errors = model.as_ref().map(|m| m.se.regressor_values.iter()); - std::iter::from_fn(move || { - match &mut errors { - Some(model) => model.next().map(|val| *val as u128), - _ => Some(0), - } + std::iter::from_fn(move || match &mut errors { + Some(model) => model.next().map(|val| *val as u128), + _ => Some(0), }) } @@ -189,12 +193,16 @@ fn get_benchmark_data( let mut used_reads = Vec::new(); let mut used_writes = Vec::new(); - extrinsic_time.slopes.into_iter() + extrinsic_time + .slopes + .into_iter() .zip(extrinsic_time.names.iter()) .zip(extract_errors(&extrinsic_time.model)) .for_each(|((slope, name), error)| { if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } + if !used_components.contains(&name) { + used_components.push(name); + } used_extrinsic_time.push(ComponentSlope { name: name.clone(), slope: slope.saturating_mul(1000), @@ -202,35 +210,36 @@ fn get_benchmark_data( }); } }); - reads.slopes.into_iter() + reads + .slopes + .into_iter() .zip(reads.names.iter()) .zip(extract_errors(&reads.model)) .for_each(|((slope, name), error)| { if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_reads.push(ComponentSlope { - name: name.clone(), - slope, - error, - }); + if !used_components.contains(&name) { + used_components.push(name); + } + used_reads.push(ComponentSlope { name: name.clone(), slope, error }); } }); - writes.slopes.into_iter() + writes + .slopes + .into_iter() .zip(writes.names.iter()) .zip(extract_errors(&writes.model)) .for_each(|((slope, name), error)| { if !slope.is_zero() { - if !used_components.contains(&name) { used_components.push(name); } - used_writes.push(ComponentSlope { - name: name.clone(), - slope, - error, - }); + if !used_components.contains(&name) { + used_components.push(name); + } + used_writes.push(ComponentSlope { name: name.clone(), slope, error }); } }); // This puts a marker on any component which is entirely unused in the weight formula. - let components = batch.results[0].components + let components = batch.results[0] + .components .iter() .map(|(name, _)| -> Component { let name_string = name.to_string(); @@ -264,12 +273,8 @@ pub fn write_results( ) -> Result<(), std::io::Error> { // Use custom template if provided. let template: String = match &cmd.template { - Some(template_file) => { - fs::read_to_string(template_file)? - }, - None => { - TEMPLATE.to_string() - }, + Some(template_file) => fs::read_to_string(template_file)?, + None => TEMPLATE.to_string(), }; // Use header if provided @@ -288,9 +293,8 @@ pub fn write_results( let args = std::env::args().collect::>(); // Which analysis function should be used when outputting benchmarks - let analysis_choice: AnalysisChoice = cmd.output_analysis.clone() - .try_into() - .map_err(|e| io_error(e))?; + let analysis_choice: AnalysisChoice = + cmd.output_analysis.clone().try_into().map_err(|e| io_error(e))?; // Capture individual args let cmd_data = CmdData { @@ -341,7 +345,8 @@ pub fn write_results( }; let mut output_file = fs::File::create(file_path)?; - handlebars.render_template_to_write(&template, &hbs_data, &mut output_file) + handlebars + .render_template_to_write(&template, &hbs_data, &mut output_file) .map_err(|e| io_error(&e.to_string()))?; } Ok(()) @@ -355,7 +360,9 @@ fn add_storage_comments( results: &[BenchmarkResults], storage_info: &[StorageInfo], ) { - let storage_info_map = storage_info.iter().map(|info| (info.prefix.clone(), info)) + let storage_info_map = storage_info + .iter() + .map(|info| (info.prefix.clone(), info)) .collect::>(); // This tracks the keys we already identified, so we only generate a single comment. let mut identified = HashSet::>::new(); @@ -363,12 +370,14 @@ fn add_storage_comments( for result in results.clone() { for (key, reads, writes, whitelisted) in &result.keys { // skip keys which are whitelisted - if *whitelisted { continue; } + if *whitelisted { + continue + } let prefix_length = key.len().min(32); let prefix = key[0..prefix_length].to_vec(); if identified.contains(&prefix) { // skip adding comments for keys we already identified - continue; + continue } else { // track newly identified keys identified.insert(prefix.clone()); @@ -377,8 +386,10 @@ fn add_storage_comments( Some(key_info) => { let comment = format!( "Storage: {} {} (r:{} w:{})", - String::from_utf8(key_info.pallet_name.clone()).expect("encoded from string"), - String::from_utf8(key_info.storage_name.clone()).expect("encoded from string"), + String::from_utf8(key_info.pallet_name.clone()) + .expect("encoded from string"), + String::from_utf8(key_info.storage_name.clone()) + .expect("encoded from string"), reads, writes, ); @@ -392,7 +403,7 @@ fn add_storage_comments( writes, ); comments.push(comment) - } + }, } } } @@ -400,7 +411,8 @@ fn add_storage_comments( // Add an underscore after every 3rd character, i.e. a separator for large numbers. fn underscore(i: Number) -> String - where Number: std::string::ToString +where + Number: std::string::ToString, { let mut s = String::new(); let i_str = i.to_string(); @@ -420,11 +432,12 @@ fn underscore(i: Number) -> String struct UnderscoreHelper; impl handlebars::HelperDef for UnderscoreHelper { fn call<'reg: 'rc, 'rc>( - &self, h: &handlebars::Helper, + &self, + h: &handlebars::Helper, _: &handlebars::Handlebars, _: &handlebars::Context, _rc: &mut handlebars::RenderContext, - out: &mut dyn handlebars::Output + out: &mut dyn handlebars::Output, ) -> handlebars::HelperResult { use handlebars::JsonRender; let param = h.param(0).unwrap(); @@ -439,17 +452,20 @@ impl handlebars::HelperDef for UnderscoreHelper { struct JoinHelper; impl handlebars::HelperDef for JoinHelper { fn call<'reg: 'rc, 'rc>( - &self, h: &handlebars::Helper, + &self, + h: &handlebars::Helper, _: &handlebars::Handlebars, _: &handlebars::Context, _rc: &mut handlebars::RenderContext, - out: &mut dyn handlebars::Output + out: &mut dyn handlebars::Output, ) -> handlebars::HelperResult { use handlebars::JsonRender; let param = h.param(0).unwrap(); let value = param.value(); let joined = if value.is_array() { - value.as_array().unwrap() + value + .as_array() + .unwrap() .iter() .map(|v| v.render()) .collect::>() @@ -465,9 +481,9 @@ impl handlebars::HelperDef for JoinHelper { // u128 does not serialize well into JSON for `handlebars`, so we represent it as a string. fn string_serialize(x: &u128, s: S) -> Result where - S: serde::Serializer, + S: serde::Serializer, { - s.serialize_str(&x.to_string()) + s.serialize_str(&x.to_string()) } #[cfg(test)] @@ -475,22 +491,26 @@ mod test { use super::*; use frame_benchmarking::{BenchmarkBatch, BenchmarkParameter, BenchmarkResults}; - fn test_data(pallet: &[u8], benchmark: &[u8], param: BenchmarkParameter, base: u32, slope: u32) -> BenchmarkBatch { + fn test_data( + pallet: &[u8], + benchmark: &[u8], + param: BenchmarkParameter, + base: u32, + slope: u32, + ) -> BenchmarkBatch { let mut results = Vec::new(); - for i in 0 .. 5 { - results.push( - BenchmarkResults { - components: vec![(param, i), (BenchmarkParameter::z, 0)], - extrinsic_time: (base + slope * i).into(), - storage_root_time: (base + slope * i).into(), - reads: (base + slope * i).into(), - repeat_reads: 0, - writes: (base + slope * i).into(), - repeat_writes: 0, - proof_size: 0, - keys: vec![], - } - ) + for i in 0..5 { + results.push(BenchmarkResults { + components: vec![(param, i), (BenchmarkParameter::z, 0)], + extrinsic_time: (base + slope * i).into(), + storage_root_time: (base + slope * i).into(), + reads: (base + slope * i).into(), + repeat_reads: 0, + writes: (base + slope * i).into(), + repeat_writes: 0, + proof_size: 0, + keys: vec![], + }) } return BenchmarkBatch { @@ -506,37 +526,25 @@ mod test { benchmark.components, vec![ Component { name: component.to_string(), is_used: true }, - Component { name: "z".to_string(), is_used: false}, + Component { name: "z".to_string(), is_used: false }, ], ); // Weights multiplied by 1,000 assert_eq!(benchmark.base_weight, base * 1_000); assert_eq!( benchmark.component_weight, - vec![ComponentSlope { - name: component.to_string(), - slope: slope * 1_000, - error: 0, - }] + vec![ComponentSlope { name: component.to_string(), slope: slope * 1_000, error: 0 }] ); // DB Reads/Writes are untouched assert_eq!(benchmark.base_reads, base); assert_eq!( benchmark.component_reads, - vec![ComponentSlope { - name: component.to_string(), - slope, - error: 0, - }] + vec![ComponentSlope { name: component.to_string(), slope, error: 0 }] ); assert_eq!(benchmark.base_writes, base); assert_eq!( benchmark.component_writes, - vec![ComponentSlope { - name: component.to_string(), - slope, - error: 0, - }] + vec![ComponentSlope { name: component.to_string(), slope, error: 0 }] ); } @@ -550,23 +558,24 @@ mod test { ], &[], &AnalysisChoice::default(), - ).unwrap(); + ) + .unwrap(); - let first_benchmark = &mapped_results.get( - &("first_pallet".to_string(), "instance".to_string()) - ).unwrap()[0]; + let first_benchmark = &mapped_results + .get(&("first_pallet".to_string(), "instance".to_string())) + .unwrap()[0]; assert_eq!(first_benchmark.name, "first_benchmark"); check_data(first_benchmark, "a", 10, 3); - let second_benchmark = &mapped_results.get( - &("first_pallet".to_string(), "instance".to_string()) - ).unwrap()[1]; + let second_benchmark = &mapped_results + .get(&("first_pallet".to_string(), "instance".to_string())) + .unwrap()[1]; assert_eq!(second_benchmark.name, "second_benchmark"); check_data(second_benchmark, "b", 9, 2); - let second_pallet_benchmark = &mapped_results.get( - &("second_pallet".to_string(), "instance".to_string()) - ).unwrap()[0]; + let second_pallet_benchmark = &mapped_results + .get(&("second_pallet".to_string(), "instance".to_string())) + .unwrap()[0]; assert_eq!(second_pallet_benchmark.name, "first_benchmark"); check_data(second_pallet_benchmark, "c", 3, 4); } diff --git a/substrate/utils/frame/frame-utilities-cli/src/lib.rs b/substrate/utils/frame/frame-utilities-cli/src/lib.rs index 83f3e9ea00d453e596d69e58aa8e34859ba73d4f..4f5b1da5766a31f0b74cea773e674f4acf5853f2 100644 --- a/substrate/utils/frame/frame-utilities-cli/src/lib.rs +++ b/substrate/utils/frame/frame-utilities-cli/src/lib.rs @@ -20,4 +20,3 @@ mod pallet_id; pub use pallet_id::PalletIdCmd; - diff --git a/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs b/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs index 09304979cb09ff156ab087f511848ed58ef8640a..2caac7db588a96d002ecb843a07c7f7fbb6d3221 100644 --- a/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ b/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -17,22 +17,19 @@ //! Implementation of the `palletid` subcommand +use frame_support::PalletId; use sc_cli::{ - Error, utils::print_from_uri, CryptoSchemeFlag, - OutputTypeFlag, KeystoreParams, with_crypto_scheme, + utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, + OutputTypeFlag, }; +use sp_core::crypto::{Ss58AddressFormat, Ss58Codec}; use sp_runtime::traits::AccountIdConversion; -use sp_core::crypto::{Ss58Codec, Ss58AddressFormat}; -use std::convert::{TryInto, TryFrom}; +use std::convert::{TryFrom, TryInto}; use structopt::StructOpt; -use frame_support::PalletId; /// The `palletid` command #[derive(Debug, StructOpt)] -#[structopt( - name = "palletid", - about = "Inspect a module ID address" -)] +#[structopt(name = "palletid", about = "Inspect a module ID address")] pub struct PalletIdCmd { /// The module ID used to derive the account id: String, @@ -63,18 +60,18 @@ pub struct PalletIdCmd { impl PalletIdCmd { /// runs the command pub fn run(&self) -> Result<(), Error> - where - R: frame_system::Config, - R::AccountId: Ss58Codec, + where + R: frame_system::Config, + R::AccountId: Ss58Codec, { if self.id.len() != 8 { Err("a module id must be a string of 8 characters")? } let password = self.keystore_params.read_password()?; - let id_fixed_array: [u8; 8] = self.id.as_bytes() - .try_into() - .map_err(|_| "Cannot convert argument to palletid: argument should be 8-character string")?; + let id_fixed_array: [u8; 8] = self.id.as_bytes().try_into().map_err(|_| { + "Cannot convert argument to palletid: argument should be 8-character string" + })?; let account_id: R::AccountId = PalletId(id_fixed_array).into_account(); @@ -91,4 +88,3 @@ impl PalletIdCmd { Ok(()) } } - diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 4c1aeccf5041c54c45dbf6086bd22f0ffce90d6f..0ad6ae578b06cccb32614764544e47ac0bcd27c0 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -20,21 +20,19 @@ //! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate //! based chain, or a local state snapshot file. -use std::{ - fs, - path::{Path, PathBuf}, -}; +use codec::{Decode, Encode}; +use jsonrpsee_ws_client::{types::v2::params::JsonRpcParams, WsClient, WsClientBuilder}; use log::*; -use sp_core::hashing::twox_128; -pub use sp_io::TestExternalities; use sp_core::{ + hashing::twox_128, hexdisplay::HexDisplay, - storage::{StorageKey, StorageData}, + storage::{StorageData, StorageKey}, }; -use codec::{Encode, Decode}; +pub use sp_io::TestExternalities; use sp_runtime::traits::Block as BlockT; -use jsonrpsee_ws_client::{ - WsClientBuilder, WsClient, types::v2::params::JsonRpcParams, +use std::{ + fs, + path::{Path, PathBuf}, }; pub mod rpc_api; @@ -122,7 +120,10 @@ pub struct OnlineConfig { impl OnlineConfig { /// Return rpc (ws) client. fn rpc_client(&self) -> &WsClient { - self.transport.client.as_ref().expect("ws client must have been initialized by now; qed.") + self.transport + .client + .as_ref() + .expect("ws client must have been initialized by now; qed.") } } @@ -137,7 +138,6 @@ impl Default for OnlineConfig { } } - /// Configuration of the state snapshot. #[derive(Clone)] pub struct SnapshotConfig { @@ -208,10 +208,12 @@ impl Builder { maybe_at: Option, ) -> Result { trace!(target: LOG_TARGET, "rpc: get_storage"); - RpcApi::::get_storage(self.as_online().rpc_client(), key, maybe_at).await.map_err(|e| { - error!("Error = {:?}", e); - "rpc get_storage failed." - }) + RpcApi::::get_storage(self.as_online().rpc_client(), key, maybe_at) + .await + .map_err(|e| { + error!("Error = {:?}", e); + "rpc get_storage failed." + }) } /// Get the latest finalized head. async fn rpc_get_head(&self) -> Result { @@ -249,7 +251,7 @@ impl Builder { if page_len < PAGE as usize { debug!(target: LOG_TARGET, "last page received: {}", page_len); - break all_keys; + break all_keys } else { let new_last_key = all_keys.last().expect("all_keys is populated; has .last(); qed"); @@ -290,21 +292,22 @@ impl Builder { .map(|key| { ( "state_getStorage", - JsonRpcParams::Array( - vec![ - to_value(key).expect("json serialization will work; qed."), - to_value(at).expect("json serialization will work; qed."), - ] - ), + JsonRpcParams::Array(vec![ + to_value(key).expect("json serialization will work; qed."), + to_value(at).expect("json serialization will work; qed."), + ]), ) }) .collect::>(); - let values = client.batch_request::>(batch) - .await - .map_err(|e| { - log::error!(target: LOG_TARGET, "failed to execute batch: {:?}. Error: {:?}", chunk_keys, e); - "batch failed." - })?; + let values = client.batch_request::>(batch).await.map_err(|e| { + log::error!( + target: LOG_TARGET, + "failed to execute batch: {:?}. Error: {:?}", + chunk_keys, + e + ); + "batch failed." + })?; assert_eq!(chunk_keys.len(), values.len()); for (idx, key) in chunk_keys.into_iter().enumerate() { let maybe_value = values[idx].clone(); @@ -428,7 +431,7 @@ impl Builder { self.save_state_snapshot(&kp, &c.path)?; } kp - } + }, }; info!( @@ -497,7 +500,7 @@ impl Builder { #[cfg(test)] mod test_prelude { pub(crate) use super::*; - pub(crate) use sp_runtime::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper}; + pub(crate) use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; pub(crate) type Block = RawBlock>; @@ -551,7 +554,11 @@ mod remote_tests { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - modules: vec!["Proxy".to_owned(), "Multisig".to_owned(), "PhragmenElection".to_owned()], + modules: vec![ + "Proxy".to_owned(), + "Multisig".to_owned(), + "PhragmenElection".to_owned(), + ], ..Default::default() })) .build() diff --git a/substrate/utils/frame/remote-externalities/src/rpc_api.rs b/substrate/utils/frame/remote-externalities/src/rpc_api.rs index 59d6bba8dd8679579e31e8be55a92da91524c625..be77cd9499191b0c1bbb9e4a5b5c5ac0078eb4f7 100644 --- a/substrate/utils/frame/remote-externalities/src/rpc_api.rs +++ b/substrate/utils/frame/remote-externalities/src/rpc_api.rs @@ -18,14 +18,13 @@ //! WS RPC API for one off RPC calls to a substrate node. // TODO: Consolidate one off RPC calls https://github.com/paritytech/substrate/issues/8988 -use sp_runtime::{generic::SignedBlock, traits::{Block as BlockT, Header as HeaderT}}; use jsonrpsee_ws_client::{ - WsClientBuilder, - WsClient, - types::{ - v2::params::JsonRpcParams, - traits::Client - }, + types::{traits::Client, v2::params::JsonRpcParams}, + WsClient, WsClientBuilder, +}; +use sp_runtime::{ + generic::SignedBlock, + traits::{Block as BlockT, Header as HeaderT}, }; /// Get the header of the block identified by `at` @@ -38,7 +37,8 @@ where let params = vec![hash_to_json::(at)?]; let client = build_client(from).await?; - client.request::("chain_getHeader", JsonRpcParams::Array(params)) + client + .request::("chain_getHeader", JsonRpcParams::Array(params)) .await .map_err(|e| format!("chain_getHeader request failed: {:?}", e)) } @@ -51,7 +51,8 @@ where { let client = build_client(from).await?; - client.request::("chain_getFinalizedHead", JsonRpcParams::NoParams) + client + .request::("chain_getFinalizedHead", JsonRpcParams::NoParams) .await .map_err(|e| format!("chain_getFinalizedHead request failed: {:?}", e)) } @@ -81,7 +82,7 @@ fn hash_to_json(hash: Block::Hash) -> Result>(from: S) -> Result { - WsClientBuilder::default() + WsClientBuilder::default() .max_request_body_size(u32::MAX) .build(from.as_ref()) .await diff --git a/substrate/utils/frame/rpc/support/src/lib.rs b/substrate/utils/frame/rpc/support/src/lib.rs index 417f2bfc22ac8cffd4a7f968ec28f5f828722448..37d85f41825d3da4d2b9b077011bbb420d4d9ee3 100644 --- a/substrate/utils/frame/rpc/support/src/lib.rs +++ b/substrate/utils/frame/rpc/support/src/lib.rs @@ -20,16 +20,14 @@ #![warn(missing_docs)] +use codec::{DecodeAll, FullCodec, FullEncode}; use core::marker::PhantomData; +use frame_support::storage::generator::{StorageDoubleMap, StorageMap, StorageValue}; use futures::compat::Future01CompatExt; use jsonrpc_client_transports::RpcError; -use codec::{DecodeAll, FullCodec, FullEncode}; +use sc_rpc_api::state::StateClient; use serde::{de::DeserializeOwned, Serialize}; -use frame_support::storage::generator::{ - StorageDoubleMap, StorageMap, StorageValue -}; use sp_storage::{StorageData, StorageKey}; -use sc_rpc_api::state::StateClient; /// A typed query on chain state usable from an RPC client. /// @@ -54,7 +52,7 @@ use sc_rpc_api::state::StateClient; /// # struct TestRuntime; /// # /// # decl_module! { -/// # pub struct Module for enum Call where origin: T::Origin {} +/// # pub struct Module for enum Call where origin: T::Origin {} /// # } /// # /// pub type Loc = (i64, i64, i64); @@ -98,18 +96,12 @@ pub struct StorageQuery { impl StorageQuery { /// Create a storage query for a StorageValue. pub fn value>() -> Self { - Self { - key: StorageKey(St::storage_value_final_key().to_vec()), - _spook: PhantomData, - } + Self { key: StorageKey(St::storage_value_final_key().to_vec()), _spook: PhantomData } } /// Create a storage query for a value in a StorageMap. pub fn map, K: FullEncode>(key: K) -> Self { - Self { - key: StorageKey(St::storage_map_final_key(key)), - _spook: PhantomData, - } + Self { key: StorageKey(St::storage_map_final_key(key)), _spook: PhantomData } } /// Create a storage query for a value in a StorageDoubleMap. @@ -117,10 +109,7 @@ impl StorageQuery { key1: K1, key2: K2, ) -> Self { - Self { - key: StorageKey(St::storage_double_map_final_key(key1, key2)), - _spook: PhantomData, - } + Self { key: StorageKey(St::storage_double_map_final_key(key1, key2)), _spook: PhantomData } } /// Send this query over RPC, await the typed result. diff --git a/substrate/utils/frame/rpc/system/src/lib.rs b/substrate/utils/frame/rpc/system/src/lib.rs index e80d457de98dd1b6aadc49a353c9cde986b5d6d0..64c25157dbe216c15d1438f30283ee5bcb502ff7 100644 --- a/substrate/utils/frame/rpc/system/src/lib.rs +++ b/substrate/utils/frame/rpc/system/src/lib.rs @@ -20,28 +20,22 @@ use std::sync::Arc; use codec::{self, Codec, Decode, Encode}; -use sc_client_api::light::{future_header, RemoteBlockchain, Fetcher, RemoteCallRequest}; +use futures::future::{ready, TryFutureExt}; use jsonrpc_core::{ + futures::future::{self as rpc_future, result, Future}, Error as RpcError, ErrorCode, - futures::future::{self as rpc_future,result, Future}, }; use jsonrpc_derive::rpc; -use futures::future::{ready, TryFutureExt}; -use sp_blockchain::{ - HeaderBackend, - Error as ClientError -}; -use sp_runtime::{ - generic::BlockId, - traits, -}; -use sp_core::{hexdisplay::HexDisplay, Bytes}; -use sc_transaction_pool_api::{TransactionPool, InPoolTransaction}; -use sp_block_builder::BlockBuilder; +use sc_client_api::light::{future_header, Fetcher, RemoteBlockchain, RemoteCallRequest}; use sc_rpc_api::DenyUnsafe; +use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; +use sp_block_builder::BlockBuilder; +use sp_blockchain::{Error as ClientError, HeaderBackend}; +use sp_core::{hexdisplay::HexDisplay, Bytes}; +use sp_runtime::{generic::BlockId, traits}; -pub use frame_system_rpc_runtime_api::AccountNonceApi; pub use self::gen_client::Client as SystemClient; +pub use frame_system_rpc_runtime_api::AccountNonceApi; /// Future that resolves to account nonce. pub type FutureResult = Box + Send>; @@ -89,13 +83,8 @@ pub struct FullSystem { impl FullSystem { /// Create new `FullSystem` given client and transaction pool. - pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe,) -> Self { - FullSystem { - client, - pool, - deny_unsafe, - _marker: Default::default(), - } + pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe) -> Self { + FullSystem { client, pool, deny_unsafe, _marker: Default::default() } } } @@ -130,35 +119,37 @@ where Box::new(result(get_nonce())) } - fn dry_run(&self, extrinsic: Bytes, at: Option<::Hash>) -> FutureResult { + fn dry_run( + &self, + extrinsic: Bytes, + at: Option<::Hash>, + ) -> FutureResult { if let Err(err) = self.deny_unsafe.check_if_safe() { - return Box::new(rpc_future::err(err.into())); + return Box::new(rpc_future::err(err.into())) } let dry_run = || { let api = self.client.runtime_api(); let at = BlockId::::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. - self.client.info().best_hash - )); - - let uxt: ::Extrinsic = Decode::decode(&mut &*extrinsic).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::DecodeError.into()), - message: "Unable to dry run extrinsic.".into(), - data: Some(format!("{:?}", e).into()), - })?; + self.client.info().best_hash)); - let result = api.apply_extrinsic(&at, uxt) + let uxt: ::Extrinsic = Decode::decode(&mut &*extrinsic) .map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), + code: ErrorCode::ServerError(Error::DecodeError.into()), message: "Unable to dry run extrinsic.".into(), data: Some(format!("{:?}", e).into()), })?; + let result = api.apply_extrinsic(&at, uxt).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to dry run extrinsic.".into(), + data: Some(format!("{:?}", e).into()), + })?; + Ok(Encode::encode(&result).into()) }; - Box::new(result(dry_run())) } } @@ -179,12 +170,7 @@ impl LightSystem { fetcher: Arc, pool: Arc

, ) -> Self { - LightSystem { - client, - remote_blockchain, - fetcher, - pool, - } + LightSystem { client, remote_blockchain, fetcher, pool } } } @@ -205,21 +191,27 @@ where let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); let fetcher = self.fetcher.clone(); let call_data = account.encode(); - let future_best_header = future_best_header - .and_then(move |maybe_best_header| ready( - maybe_best_header.ok_or_else(|| { ClientError::UnknownBlock(format!("{}", best_hash)) }) - )); - let future_nonce = future_best_header.and_then(move |best_header| - fetcher.remote_call(RemoteCallRequest { - block: best_hash, - header: best_header, - method: "AccountNonceApi_account_nonce".into(), - call_data, - retry_count: None, + let future_best_header = future_best_header.and_then(move |maybe_best_header| { + ready( + maybe_best_header + .ok_or_else(|| ClientError::UnknownBlock(format!("{}", best_hash))), + ) + }); + let future_nonce = future_best_header + .and_then(move |best_header| { + fetcher.remote_call(RemoteCallRequest { + block: best_hash, + header: best_header, + method: "AccountNonceApi_account_nonce".into(), + call_data, + retry_count: None, + }) }) - ).compat(); - let future_nonce = future_nonce.and_then(|nonce| Decode::decode(&mut &nonce[..]) - .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e))); + .compat(); + let future_nonce = future_nonce.and_then(|nonce| { + Decode::decode(&mut &nonce[..]) + .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e)) + }); let future_nonce = future_nonce.map_err(|e| RpcError { code: ErrorCode::ServerError(Error::RuntimeError.into()), message: "Unable to query nonce.".into(), @@ -232,7 +224,11 @@ where Box::new(future_nonce) } - fn dry_run(&self, _extrinsic: Bytes, _at: Option<::Hash>) -> FutureResult { + fn dry_run( + &self, + _extrinsic: Bytes, + _at: Option<::Hash>, + ) -> FutureResult { Box::new(result(Err(RpcError { code: ErrorCode::MethodNotFound, message: "Unable to dry run extrinsic.".into(), @@ -243,11 +239,8 @@ where /// Adjust account nonce from state, so that tx with the nonce will be /// placed after all ready txpool transactions. -fn adjust_nonce( - pool: &P, - account: AccountId, - nonce: Index, -) -> Index where +fn adjust_nonce(pool: &P, account: AccountId, nonce: Index) -> Index +where P: TransactionPool, AccountId: Clone + std::fmt::Display + Encode, Index: Clone + std::fmt::Display + Encode + traits::AtLeast32Bit + 'static, @@ -285,9 +278,12 @@ mod tests { use super::*; use futures::executor::block_on; - use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; use sc_transaction_pool::BasicPool; - use sp_runtime::{ApplyExtrinsicResult, transaction_validity::{TransactionValidityError, InvalidTransaction}}; + use sp_runtime::{ + transaction_validity::{InvalidTransaction, TransactionValidityError}, + ApplyExtrinsicResult, + }; + use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; #[test] fn should_return_next_nonce_for_some_account() { @@ -296,13 +292,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - true.into(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let source = sp_runtime::transaction_validity::TransactionSource::External; let new_transaction = |nonce: u64| { @@ -336,13 +327,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - true.into(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); @@ -360,13 +346,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - true.into(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); @@ -375,7 +356,8 @@ mod tests { to: AccountKeyring::Bob.into(), amount: 5, nonce: 0, - }.into_signed_tx(); + } + .into_signed_tx(); // when let res = accounts.dry_run(tx.encode().into(), None); @@ -393,13 +375,8 @@ mod tests { // given let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); - let pool = BasicPool::new_full( - Default::default(), - true.into(), - None, - spawner, - client.clone(), - ); + let pool = + BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); let accounts = FullSystem::new(client, pool, DenyUnsafe::No); @@ -408,7 +385,8 @@ mod tests { to: AccountKeyring::Bob.into(), amount: 5, nonce: 100, - }.into_signed_tx(); + } + .into_signed_tx(); // when let res = accounts.dry_run(tx.encode().into(), None); diff --git a/substrate/utils/frame/try-runtime/cli/src/lib.rs b/substrate/utils/frame/try-runtime/cli/src/lib.rs index e0d09ff7fbcf48c8d34dcd234d7d0d583015ac20..4f31bd741b3a0fbe7bda603fc1180cb49c3bf04d 100644 --- a/substrate/utils/frame/try-runtime/cli/src/lib.rs +++ b/substrate/utils/frame/try-runtime/cli/src/lib.rs @@ -18,24 +18,23 @@ //! `Structopt`-ready structs for `try-runtime`. use parity_scale_codec::{Decode, Encode}; -use std::{fmt::Debug, path::PathBuf, str::FromStr, sync::Arc}; -use sc_service::Configuration; +use remote_externalities::{rpc_api, Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig}; +use sc_chain_spec::ChainSpec; use sc_cli::{CliConfiguration, ExecutionStrategy, WasmExecutionMethod}; use sc_executor::NativeExecutor; -use sc_service::NativeExecutionDispatch; -use sc_chain_spec::ChainSpec; -use sp_state_machine::StateMachine; -use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; +use sc_service::{Configuration, NativeExecutionDispatch}; use sp_core::{ + hashing::twox_128, offchain::{ - OffchainWorkerExt, OffchainDbExt, TransactionPoolExt, testing::{TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, - storage::{StorageData, StorageKey, well_known_keys}, - hashing::twox_128, + storage::{well_known_keys, StorageData, StorageKey}, }; -use sp_keystore::{KeystoreExt, testing::KeyStore}; -use remote_externalities::{Builder, Mode, SnapshotConfig, OfflineConfig, OnlineConfig, rpc_api}; +use sp_keystore::{testing::KeyStore, KeystoreExt}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_state_machine::StateMachine; +use std::{fmt::Debug, path::PathBuf, str::FromStr, sync::Arc}; mod parse; @@ -170,7 +169,7 @@ pub enum State { /// The modules to scrape. If empty, entire chain state will be scraped. #[structopt(short, long, require_delimiter = true)] modules: Option>, - } + }, } async fn on_runtime_upgrade( @@ -192,36 +191,31 @@ where let mut changes = Default::default(); let max_runtime_instances = config.max_runtime_instances; - let executor = NativeExecutor::::new( - wasm_method.into(), - heap_pages, - max_runtime_instances, - ); + let executor = + NativeExecutor::::new(wasm_method.into(), heap_pages, max_runtime_instances); let ext = { let builder = match command.state { - State::Snap { snapshot_path } => { + State::Snap { snapshot_path } => Builder::::new().mode(Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path), - })) - }, - State::Live { - snapshot_path, - modules - } => Builder::::new().mode(Mode::Online(OnlineConfig { - transport: shared.url.to_owned().into(), - state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - modules: modules.to_owned().unwrap_or_default(), - at: Some(shared.block_at::()?), - ..Default::default() - })), + })), + State::Live { snapshot_path, modules } => + Builder::::new().mode(Mode::Online(OnlineConfig { + transport: shared.url.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.to_owned().unwrap_or_default(), + at: Some(shared.block_at::()?), + ..Default::default() + })), }; let (code_key, code) = extract_code(config.chain_spec)?; builder .inject_key_value(&[(code_key, code)]) .inject_hashed_key(&[twox_128(b"System"), twox_128(b"LastRuntimeUpgrade")].concat()) - .build().await? + .build() + .await? }; let encoded_result = StateMachine::<_, _, NumberFor, _>::new( @@ -232,8 +226,7 @@ where "TryRuntime_on_runtime_upgrade", &[], ext.extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend) - .runtime_code()?, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, sp_core::testing::TaskExecutor::new(), ) .execute(execution.into()) @@ -271,35 +264,28 @@ where let mut changes = Default::default(); let max_runtime_instances = config.max_runtime_instances; - let executor = NativeExecutor::::new( - wasm_method.into(), - heap_pages, - max_runtime_instances, - ); + let executor = + NativeExecutor::::new(wasm_method.into(), heap_pages, max_runtime_instances); let mode = match command.state { - State::Live { - snapshot_path, - modules - } => { - let at = shared.block_at::()?; - let online_config = OnlineConfig { - transport: shared.url.to_owned().into(), - state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), - modules: modules.to_owned().unwrap_or_default(), - at: Some(at), - ..Default::default() - }; + State::Live { snapshot_path, modules } => { + let at = shared.block_at::()?; + let online_config = OnlineConfig { + transport: shared.url.to_owned().into(), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), + modules: modules.to_owned().unwrap_or_default(), + at: Some(at), + ..Default::default() + }; - Mode::Online(online_config) - }, - State::Snap { snapshot_path } => { - let mode = Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new(snapshot_path), - }); + Mode::Online(online_config) + }, + State::Snap { snapshot_path } => { + let mode = + Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path) }); - mode - } + mode + }, }; let builder = Builder::::new() .mode(mode) @@ -308,10 +294,7 @@ where let (code_key, code) = extract_code(config.chain_spec)?; builder.inject_key_value(&[(code_key, code)]).build().await? } else { - builder - .inject_hashed_key(well_known_keys::CODE) - .build() - .await? + builder.inject_hashed_key(well_known_keys::CODE).build().await? }; let (offchain, _offchain_state) = TestOffchainExt::new(); @@ -332,8 +315,7 @@ where "OffchainWorkerApi_offchain_worker", header.encode().as_ref(), ext.extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend) - .runtime_code()?, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend).runtime_code()?, sp_core::testing::TaskExecutor::new(), ) .execute(execution.into()) @@ -363,20 +345,16 @@ where let mut changes = Default::default(); let max_runtime_instances = config.max_runtime_instances; - let executor = NativeExecutor::::new( - wasm_method.into(), - heap_pages, - max_runtime_instances, - ); + let executor = + NativeExecutor::::new(wasm_method.into(), heap_pages, max_runtime_instances); let block_hash = shared.block_at::()?; let block: Block = rpc_api::get_block::(shared.url.clone(), block_hash).await?; let mode = match command.state { State::Snap { snapshot_path } => { - let mode = Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new(snapshot_path), - }); + let mode = + Mode::Offline(OfflineConfig { state_snapshot: SnapshotConfig::new(snapshot_path) }); mode }, @@ -392,7 +370,7 @@ where }); mode - } + }, }; let ext = { @@ -403,10 +381,7 @@ where let (code_key, code) = extract_code(config.chain_spec)?; builder.inject_key_value(&[(code_key, code)]).build().await? } else { - builder - .inject_hashed_key(well_known_keys::CODE) - .build() - .await? + builder.inject_hashed_key(well_known_keys::CODE).build().await? }; // register externality extensions in order to provide host interface for OCW to the @@ -459,15 +434,14 @@ impl TryRuntimeCmd { ExecDispatch: NativeExecutionDispatch + 'static, { match &self.command { - Command::OnRuntimeUpgrade(ref cmd) => { - on_runtime_upgrade::(self.shared.clone(), cmd.clone(), config).await - } - Command::OffchainWorker(cmd) => { - offchain_worker::(self.shared.clone(), cmd.clone(), config).await - } - Command::ExecuteBlock(cmd) => { - execute_block::(self.shared.clone(), cmd.clone(), config).await - } + Command::OnRuntimeUpgrade(ref cmd) => + on_runtime_upgrade::(self.shared.clone(), cmd.clone(), config) + .await, + Command::OffchainWorker(cmd) => + offchain_worker::(self.shared.clone(), cmd.clone(), config) + .await, + Command::ExecuteBlock(cmd) => + execute_block::(self.shared.clone(), cmd.clone(), config).await, } } } diff --git a/substrate/utils/frame/try-runtime/cli/src/parse.rs b/substrate/utils/frame/try-runtime/cli/src/parse.rs index beb9a6508fed1f55dc839c4d949bdec1a3daa7d8..7f205fbacd3104b9b0a1f1c246034ff5699d90bc 100644 --- a/substrate/utils/frame/try-runtime/cli/src/parse.rs +++ b/substrate/utils/frame/try-runtime/cli/src/parse.rs @@ -18,11 +18,8 @@ //! Utils for parsing user input pub(crate) fn hash(block_hash: &str) -> Result { - let (block_hash, offset) = if block_hash.starts_with("0x") { - (&block_hash[2..], 2) - } else { - (block_hash, 0) - }; + let (block_hash, offset) = + if block_hash.starts_with("0x") { (&block_hash[2..], 2) } else { (block_hash, 0) }; if let Some(pos) = block_hash.chars().position(|c| !c.is_ascii_hexdigit()) { Err(format!( diff --git a/substrate/utils/prometheus/src/lib.rs b/substrate/utils/prometheus/src/lib.rs index 93a56d084fd044e4d98e2f0fbb506d266d4959e7..96407b00623568523dc42bec6029eb5dd1ddc409 100644 --- a/substrate/utils/prometheus/src/lib.rs +++ b/substrate/utils/prometheus/src/lib.rs @@ -15,33 +15,34 @@ // See the License for the specific language governing permissions and // limitations under the License. -use futures_util::{FutureExt, future::Future}; +use futures_util::{future::Future, FutureExt}; pub use prometheus::{ self, - Registry, Error as PrometheusError, Opts, - Histogram, HistogramOpts, HistogramVec, - exponential_buckets, core::{ - GenericGauge as Gauge, GenericCounter as Counter, - GenericGaugeVec as GaugeVec, GenericCounterVec as CounterVec, - AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, - } + AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, GenericCounter as Counter, + GenericCounterVec as CounterVec, GenericGauge as Gauge, GenericGaugeVec as GaugeVec, + }, + exponential_buckets, Error as PrometheusError, Histogram, HistogramOpts, HistogramVec, Opts, + Registry, }; -use prometheus::{Encoder, TextEncoder, core::Collector}; +use prometheus::{core::Collector, Encoder, TextEncoder}; use std::net::SocketAddr; #[cfg(not(target_os = "unknown"))] mod networking; mod sourced; -pub use sourced::{SourcedCounter, SourcedGauge, MetricSource, SourcedMetric}; +pub use sourced::{MetricSource, SourcedCounter, SourcedGauge, SourcedMetric}; -#[cfg(target_os = "unknown")] -pub use unknown_os::init_prometheus; #[cfg(not(target_os = "unknown"))] pub use known_os::init_prometheus; +#[cfg(target_os = "unknown")] +pub use unknown_os::init_prometheus; -pub fn register(metric: T, registry: &Registry) -> Result { +pub fn register( + metric: T, + registry: &Registry, +) -> Result { registry.register(Box::new(metric.clone()))?; Ok(metric) } @@ -61,8 +62,11 @@ mod unknown_os { #[cfg(not(target_os = "unknown"))] mod known_os { use super::*; - use hyper::http::StatusCode; - use hyper::{Server, Body, Request, Response, service::{service_fn, make_service_fn}}; + use hyper::{ + http::StatusCode, + service::{make_service_fn, service_fn}, + Body, Request, Response, Server, + }; #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { @@ -73,7 +77,7 @@ mod known_os { /// i/o error. Io(std::io::Error), #[display(fmt = "Prometheus port {} already in use.", _0)] - PortInUse(SocketAddr) + PortInUse(SocketAddr), } impl std::error::Error for Error { @@ -82,28 +86,32 @@ mod known_os { Error::Hyper(error) => Some(error), Error::Http(error) => Some(error), Error::Io(error) => Some(error), - Error::PortInUse(_) => None + Error::PortInUse(_) => None, } } } - async fn request_metrics(req: Request, registry: Registry) -> Result, Error> { + async fn request_metrics( + req: Request, + registry: Registry, + ) -> Result, Error> { if req.uri().path() == "/metrics" { let metric_families = registry.gather(); let mut buffer = vec![]; let encoder = TextEncoder::new(); encoder.encode(&metric_families, &mut buffer).unwrap(); - Response::builder().status(StatusCode::OK) + Response::builder() + .status(StatusCode::OK) .header("Content-Type", encoder.format_type()) .body(Body::from(buffer)) .map_err(Error::Http) } else { - Response::builder().status(StatusCode::NOT_FOUND) + Response::builder() + .status(StatusCode::NOT_FOUND) .body(Body::from("Not found.")) .map_err(Error::Http) } - } #[derive(Clone)] @@ -121,7 +129,10 @@ mod known_os { /// Initializes the metrics context, and starts an HTTP server /// to serve metrics. - pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error>{ + pub async fn init_prometheus( + prometheus_addr: SocketAddr, + registry: Registry, + ) -> Result<(), Error> { use networking::Incoming; let listener = async_std::net::TcpListener::bind(&prometheus_addr) .await diff --git a/substrate/utils/prometheus/src/networking.rs b/substrate/utils/prometheus/src/networking.rs index 48ae8a23297c9ef5e90bf51b34e01b10a84991b5..e04ac99a5694870063a1adae994d2beec17edb0c 100644 --- a/substrate/utils/prometheus/src/networking.rs +++ b/substrate/utils/prometheus/src/networking.rs @@ -16,8 +16,11 @@ // limitations under the License. use async_std::pin::Pin; -use std::task::{Poll, Context}; -use futures_util::{stream::Stream, io::{AsyncRead, AsyncWrite}}; +use futures_util::{ + io::{AsyncRead, AsyncWrite}, + stream::Stream, +}; +use std::task::{Context, Poll}; pub struct Incoming<'a>(pub async_std::net::Incoming<'a>); @@ -25,7 +28,10 @@ impl hyper::server::accept::Accept for Incoming<'_> { type Conn = TcpStream; type Error = async_std::io::Error; - fn poll_accept(self: Pin<&mut Self>, cx: &mut Context) -> Poll>> { + fn poll_accept( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll>> { Pin::new(&mut Pin::into_inner(self).0) .poll_next(cx) .map(|opt| opt.map(|res| res.map(TcpStream))) @@ -38,10 +44,9 @@ impl tokio::io::AsyncRead for TcpStream { fn poll_read( self: Pin<&mut Self>, cx: &mut Context, - buf: &mut [u8] + buf: &mut [u8], ) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_read(cx, buf) + Pin::new(&mut Pin::into_inner(self).0).poll_read(cx, buf) } } @@ -49,19 +54,16 @@ impl tokio::io::AsyncWrite for TcpStream { fn poll_write( self: Pin<&mut Self>, cx: &mut Context, - buf: &[u8] + buf: &[u8], ) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_write(cx, buf) + Pin::new(&mut Pin::into_inner(self).0).poll_write(cx, buf) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_flush(cx) + Pin::new(&mut Pin::into_inner(self).0).poll_flush(cx) } fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_close(cx) + Pin::new(&mut Pin::into_inner(self).0).poll_close(cx) } } diff --git a/substrate/utils/prometheus/src/sourced.rs b/substrate/utils/prometheus/src/sourced.rs index 014bdb30f8ab76821c1ceed7fc21cb97a1005e6e..78853a6ef354fbee7995a2e72d0107f2a7b5f510 100644 --- a/substrate/utils/prometheus/src/sourced.rs +++ b/substrate/utils/prometheus/src/sourced.rs @@ -17,8 +17,10 @@ //! Metrics that are collected from existing sources. -use prometheus::core::{Collector, Desc, Describer, Number, Opts}; -use prometheus::proto; +use prometheus::{ + core::{Collector, Desc, Describer, Number, Opts}, + proto, +}; use std::{cmp::Ordering, marker::PhantomData}; /// A counter whose values are obtained from an existing source. @@ -80,15 +82,15 @@ impl Collector for SourcedMetric { let mut c = proto::Counter::default(); c.set_value(value.into_f64()); m.set_counter(c); - } + }, proto::MetricType::GAUGE => { let mut g = proto::Gauge::default(); g.set_value(value.into_f64()); m.set_gauge(g); - } + }, t => { log::error!("Unsupported sourced metric type: {:?}", t); - } + }, } debug_assert_eq!(self.desc.variable_labels.len(), label_values.len()); @@ -97,18 +99,23 @@ impl Collector for SourcedMetric { log::warn!("Missing label values for sourced metric {}", self.desc.fq_name), Ordering::Less => log::warn!("Too many label values for sourced metric {}", self.desc.fq_name), - Ordering::Equal => {} + Ordering::Equal => {}, } - m.set_label(self.desc.variable_labels.iter().zip(label_values) - .map(|(l_name, l_value)| { - let mut l = proto::LabelPair::default(); - l.set_name(l_name.to_string()); - l.set_value(l_value.to_string()); - l - }) - .chain(self.desc.const_label_pairs.iter().cloned()) - .collect::>()); + m.set_label( + self.desc + .variable_labels + .iter() + .zip(label_values) + .map(|(l_name, l_value)| { + let mut l = proto::LabelPair::default(); + l.set_name(l_name.to_string()); + l.set_value(l_value.to_string()); + l + }) + .chain(self.desc.const_label_pairs.iter().cloned()) + .collect::>(), + ); counters.push(m); }); @@ -130,11 +137,15 @@ pub trait SourcedType: private::Sealed + Sync + Send { } impl SourcedType for Counter { - fn proto() -> proto::MetricType { proto::MetricType::COUNTER } + fn proto() -> proto::MetricType { + proto::MetricType::COUNTER + } } impl SourcedType for Gauge { - fn proto() -> proto::MetricType { proto::MetricType::GAUGE } + fn proto() -> proto::MetricType { + proto::MetricType::GAUGE + } } mod private { diff --git a/substrate/utils/wasm-builder/src/builder.rs b/substrate/utils/wasm-builder/src/builder.rs index 9e8216f04fedbc4b74c02b7c0822bb61e12dbada..20f33583b89203e1ba19c3ccc462a4463210bd3a 100644 --- a/substrate/utils/wasm-builder/src/builder.rs +++ b/substrate/utils/wasm-builder/src/builder.rs @@ -15,7 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{env, path::{PathBuf, Path}, process}; +use std::{ + env, + path::{Path, PathBuf}, + process, +}; /// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. fn get_manifest_dir() -> PathBuf { @@ -50,10 +54,7 @@ impl WasmBuilderSelectProject { /// Use the given `path` as project for building the WASM binary. /// /// Returns an error if the given `path` does not points to a `Cargo.toml`. - pub fn with_project( - self, - path: impl Into, - ) -> Result { + pub fn with_project(self, path: impl Into) -> Result { let path = path.into(); if path.ends_with("Cargo.toml") && path.exists() { @@ -97,9 +98,7 @@ pub struct WasmBuilder { impl WasmBuilder { /// Create a new instance of the builder. pub fn new() -> WasmBuilderSelectProject { - WasmBuilderSelectProject { - _ignore: (), - } + WasmBuilderSelectProject { _ignore: () } } /// Enable exporting `__heap_base` as global variable in the WASM binary. @@ -147,9 +146,8 @@ impl WasmBuilder { /// Build the WASM binary. pub fn build(self) { let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); - let file_path = out_dir.join( - self.file_name.clone().unwrap_or_else(|| "wasm_binary.rs".into()), - ); + let file_path = + out_dir.join(self.file_name.clone().unwrap_or_else(|| "wasm_binary.rs".into())); if check_skip_build() { // If we skip the build, we still want to make sure to be called when an env variable @@ -158,7 +156,7 @@ impl WasmBuilder { provide_dummy_wasm_binary_if_not_exist(&file_path); - return; + return } build_project( @@ -179,13 +177,17 @@ impl WasmBuilder { fn generate_crate_skip_build_env_name() -> String { format!( "SKIP_{}_WASM_BUILD", - env::var("CARGO_PKG_NAME").expect("Package name is set").to_uppercase().replace('-', "_"), + env::var("CARGO_PKG_NAME") + .expect("Package name is set") + .to_uppercase() + .replace('-', "_"), ) } /// Checks if the build of the WASM binary should be skipped. fn check_skip_build() -> bool { - env::var(crate::SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() + env::var(crate::SKIP_BUILD_ENV).is_ok() || + env::var(generate_crate_skip_build_env_name()).is_ok() } /// Provide a dummy WASM binary if there doesn't exist one. @@ -243,15 +245,9 @@ fn build_project( ); let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { - ( - wasm_binary.wasm_binary_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) + (wasm_binary.wasm_binary_path_escaped(), bloaty.wasm_binary_bloaty_path_escaped()) } else { - ( - bloaty.wasm_binary_bloaty_path_escaped(), - bloaty.wasm_binary_bloaty_path_escaped(), - ) + (bloaty.wasm_binary_bloaty_path_escaped(), bloaty.wasm_binary_bloaty_path_escaped()) }; crate::write_file_if_changed( diff --git a/substrate/utils/wasm-builder/src/lib.rs b/substrate/utils/wasm-builder/src/lib.rs index 0a3c856344dcdca7fda69bc002a575dff5df2fff..0bfd4e7550146f16e364bc0123b9661b674f911b 100644 --- a/substrate/utils/wasm-builder/src/lib.rs +++ b/substrate/utils/wasm-builder/src/lib.rs @@ -96,7 +96,12 @@ //! as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, //! the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. -use std::{env, fs, path::{PathBuf, Path}, process::Command, io::BufRead}; +use std::{ + env, fs, + io::BufRead, + path::{Path, PathBuf}, + process::Command, +}; mod builder; mod prerequisites; @@ -144,18 +149,16 @@ fn copy_file_if_changed(src: PathBuf, dst: PathBuf) { let dst_file = fs::read_to_string(&dst).ok(); if src_file != dst_file { - fs::copy(&src, &dst) - .unwrap_or_else( - |_| panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display()) - ); + fs::copy(&src, &dst).unwrap_or_else(|_| { + panic!("Copying `{}` to `{}` can not fail; qed", src.display(), dst.display()) + }); } } /// Get a cargo command that compiles with nightly fn get_nightly_cargo() -> CargoCommand { - let env_cargo = CargoCommand::new( - &env::var("CARGO").expect("`CARGO` env variable is always set by cargo"), - ); + let env_cargo = + CargoCommand::new(&env::var("CARGO").expect("`CARGO` env variable is always set by cargo")); let default_cargo = CargoCommand::new("cargo"); let rustup_run_nightly = CargoCommand::new_with_args("rustup", &["run", "nightly", "cargo"]); let wasm_toolchain = env::var(WASM_BUILD_TOOLCHAIN).ok(); @@ -197,7 +200,7 @@ fn get_rustup_nightly(selected: Option) -> Option { } latest_nightly?.trim_end_matches(&host).into() - } + }, }; Some(CargoCommand::new_with_args("rustup", &["run", &version, "cargo"])) @@ -253,10 +256,7 @@ struct CargoCommandVersioned { impl CargoCommandVersioned { fn new(command: CargoCommand, version: String) -> Self { - Self { - command, - version, - } + Self { command, version } } /// Returns the `rustc` version. diff --git a/substrate/utils/wasm-builder/src/prerequisites.rs b/substrate/utils/wasm-builder/src/prerequisites.rs index dbbd9c0a56229fb8ea8bd7fa694911ec48b3e89e..0dad8b781ae5af7ce949109df3eab4ef5fa75527 100644 --- a/substrate/utils/wasm-builder/src/prerequisites.rs +++ b/substrate/utils/wasm-builder/src/prerequisites.rs @@ -15,12 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{CargoCommandVersioned, CargoCommand, write_file_if_changed}; +use crate::{write_file_if_changed, CargoCommand, CargoCommandVersioned}; use std::{fs, path::Path}; -use tempfile::tempdir; use ansi_term::Color; +use tempfile::tempdir; /// Print an error message. fn print_error_message(message: &str) -> String { @@ -95,7 +95,7 @@ fn create_check_toolchain_project(project_dir: &Path) { rustc_version.unwrap_or_else(|| "unknown rustc version".into()), ); } - "# + "#, ); // Just prints the `RURSTC_VERSION` environment variable that is being created by the // `build.rs` script. @@ -105,7 +105,7 @@ fn create_check_toolchain_project(project_dir: &Path) { fn main() { println!("{}", env!("RUSTC_VERSION")); } - "# + "#, ); } @@ -120,7 +120,12 @@ fn check_wasm_toolchain_installed( let manifest_path = temp.path().join("Cargo.toml").display().to_string(); let mut build_cmd = cargo_command.command(); - build_cmd.args(&["build", "--target=wasm32-unknown-unknown", "--manifest-path", &manifest_path]); + build_cmd.args(&[ + "build", + "--target=wasm32-unknown-unknown", + "--manifest-path", + &manifest_path, + ]); if super::color_output_enabled() { build_cmd.arg("--color=always"); @@ -133,33 +138,27 @@ fn check_wasm_toolchain_installed( build_cmd.env_remove("CARGO_TARGET_DIR"); run_cmd.env_remove("CARGO_TARGET_DIR"); - build_cmd - .output() - .map_err(|_| err_msg.clone()) - .and_then(|s| - if s.status.success() { - let version = run_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()); - Ok(CargoCommandVersioned::new( - cargo_command, - version.unwrap_or_else(|| "unknown rustc version".into()), - )) - } else { - match String::from_utf8(s.stderr) { - Ok(ref err) if err.contains("linker `rust-lld` not found") => { - Err(print_error_message("`rust-lld` not found, please install it!")) - }, - Ok(ref err) => Err( - format!( - "{}\n\n{}\n{}\n{}{}\n", - err_msg, - Color::Yellow.bold().paint("Further error information:"), - Color::Yellow.bold().paint("-".repeat(60)), - err, - Color::Yellow.bold().paint("-".repeat(60)), - ) - ), - Err(_) => Err(err_msg), - } + build_cmd.output().map_err(|_| err_msg.clone()).and_then(|s| { + if s.status.success() { + let version = run_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()); + Ok(CargoCommandVersioned::new( + cargo_command, + version.unwrap_or_else(|| "unknown rustc version".into()), + )) + } else { + match String::from_utf8(s.stderr) { + Ok(ref err) if err.contains("linker `rust-lld` not found") => + Err(print_error_message("`rust-lld` not found, please install it!")), + Ok(ref err) => Err(format!( + "{}\n\n{}\n{}\n{}{}\n", + err_msg, + Color::Yellow.bold().paint("Further error information:"), + Color::Yellow.bold().paint("-".repeat(60)), + err, + Color::Yellow.bold().paint("-".repeat(60)), + )), + Err(_) => Err(err_msg), } - ) + } + }) } diff --git a/substrate/utils/wasm-builder/src/wasm_project.rs b/substrate/utils/wasm-builder/src/wasm_project.rs index 466c2145e6ceedf98fc69a3e0b6182db2b4a48ea..60b0d76fd0c9339ef378540930752010f46a3f7b 100644 --- a/substrate/utils/wasm-builder/src/wasm_project.rs +++ b/substrate/utils/wasm-builder/src/wasm_project.rs @@ -18,15 +18,20 @@ use crate::{write_file_if_changed, CargoCommandVersioned}; use std::{ - fs, path::{Path, PathBuf}, borrow::ToOwned, process, env, collections::HashSet, - hash::{Hash, Hasher}, ops::Deref, + borrow::ToOwned, + collections::HashSet, + env, fs, + hash::{Hash, Hasher}, + ops::Deref, + path::{Path, PathBuf}, + process, }; use toml::value::Table; use build_helper::rerun_if_changed; -use cargo_metadata::{MetadataCommand, Metadata}; +use cargo_metadata::{Metadata, MetadataCommand}; use walkdir::WalkDir; @@ -114,19 +119,16 @@ pub(crate) fn create_and_compile( ); build_project(&project, default_rustflags, cargo_cmd); - let (wasm_binary, wasm_binary_compressed, bloaty) = compact_wasm_file( - &project, - project_cargo_toml, - wasm_binary_name, - ); + let (wasm_binary, wasm_binary_compressed, bloaty) = + compact_wasm_file(&project, project_cargo_toml, wasm_binary_name); - wasm_binary.as_ref().map(|wasm_binary| - copy_wasm_to_target_directory(project_cargo_toml, wasm_binary) - ); + wasm_binary + .as_ref() + .map(|wasm_binary| copy_wasm_to_target_directory(project_cargo_toml, wasm_binary)); - wasm_binary_compressed.as_ref().map(|wasm_binary_compressed| + wasm_binary_compressed.as_ref().map(|wasm_binary_compressed| { copy_wasm_to_target_directory(project_cargo_toml, wasm_binary_compressed) - ); + }); generate_rerun_if_changed_instructions(project_cargo_toml, &project, &wasm_workspace); @@ -144,17 +146,17 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { } if !path.pop() { - return None; + return None } } } if let Some(path) = find_impl(build_helper::out_dir()) { - return Some(path); + return Some(path) } if let Some(path) = find_impl(cargo_manifest.to_path_buf()) { - return Some(path); + return Some(path) } build_helper::warning!( @@ -169,15 +171,20 @@ fn find_cargo_lock(cargo_manifest: &Path) -> Option { /// Extract the crate name from the given `Cargo.toml`. fn get_crate_name(cargo_manifest: &Path) -> String { let cargo_toml: Table = toml::from_str( - &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed") - ).expect("Cargo manifest is a valid toml file; qed"); + &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed"), + ) + .expect("Cargo manifest is a valid toml file; qed"); let package = cargo_toml .get("package") .and_then(|t| t.as_table()) .expect("`package` key exists in valid `Cargo.toml`; qed"); - package.get("name").and_then(|p| p.as_str()).map(ToOwned::to_owned).expect("Package name exists; qed") + package + .get("name") + .and_then(|p| p.as_str()) + .map(ToOwned::to_owned) + .expect("Package name exists; qed") } /// Returns the name for the wasm binary. @@ -192,9 +199,10 @@ fn get_wasm_workspace_root() -> PathBuf { loop { match out_dir.parent() { Some(parent) if out_dir.ends_with("build") => return parent.to_path_buf(), - _ => if !out_dir.pop() { - break; - } + _ => + if !out_dir.pop() { + break + }, } } @@ -210,10 +218,10 @@ fn create_project_cargo_toml( enabled_features: impl Iterator, ) { let mut workspace_toml: Table = toml::from_str( - &fs::read_to_string( - workspace_root_path.join("Cargo.toml"), - ).expect("Workspace root `Cargo.toml` exists; qed") - ).expect("Workspace root `Cargo.toml` is a valid toml file; qed"); + &fs::read_to_string(workspace_root_path.join("Cargo.toml")) + .expect("Workspace root `Cargo.toml` exists; qed"), + ) + .expect("Workspace root `Cargo.toml` is a valid toml file; qed"); let mut wasm_workspace_toml = Table::new(); @@ -232,25 +240,25 @@ fn create_project_cargo_toml( wasm_workspace_toml.insert("profile".into(), profile.into()); // Add patch section from the project root `Cargo.toml` - while let Some(mut patch) = workspace_toml.remove("patch") - .and_then(|p| p.try_into::().ok()) { + while let Some(mut patch) = + workspace_toml.remove("patch").and_then(|p| p.try_into::
().ok()) + { // Iterate over all patches and make the patch path absolute from the workspace root path. - patch.iter_mut() - .filter_map(|p| + patch + .iter_mut() + .filter_map(|p| { p.1.as_table_mut().map(|t| t.iter_mut().filter_map(|t| t.1.as_table_mut())) - ) + }) .flatten() - .for_each(|p| - p.iter_mut() - .filter(|(k, _)| k == &"path") - .for_each(|(_, v)| { - if let Some(path) = v.as_str().map(PathBuf::from) { - if path.is_relative() { - *v = workspace_root_path.join(path).display().to_string().into(); - } + .for_each(|p| { + p.iter_mut().filter(|(k, _)| k == &"path").for_each(|(_, v)| { + if let Some(path) = v.as_str().map(PathBuf::from) { + if path.is_relative() { + *v = workspace_root_path.join(path).display().to_string().into(); } - }) - ); + } + }) + }); wasm_workspace_toml.insert("patch".into(), patch.into()); } @@ -296,7 +304,8 @@ fn find_package_by_manifest_path<'a>( manifest_path: &Path, crate_metadata: &'a cargo_metadata::Metadata, ) -> &'a cargo_metadata::Package { - crate_metadata.packages + crate_metadata + .packages .iter() .find(|p| p.manifest_path == manifest_path) .expect("Wasm project exists in its own metadata; qed") @@ -309,18 +318,19 @@ fn project_enabled_features( ) -> Vec { let package = find_package_by_manifest_path(cargo_manifest, crate_metadata); - let mut enabled_features = package.features.keys() + let mut enabled_features = package + .features + .keys() .filter(|f| { let mut feature_env = f.replace("-", "_"); feature_env.make_ascii_uppercase(); // We don't want to enable the `std`/`default` feature for the wasm build and // we need to check if the feature is enabled by checking the env variable. - *f != "std" - && *f != "default" - && env::var(format!("CARGO_FEATURE_{}", feature_env)) - .map(|v| v == "1") - .unwrap_or_default() + *f != "std" && + *f != "default" && env::var(format!("CARGO_FEATURE_{}", feature_env)) + .map(|v| v == "1") + .unwrap_or_default() }) .cloned() .collect::>(); @@ -418,7 +428,8 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), ); - build_cmd.args(&["rustc", "--target=wasm32-unknown-unknown"]) + build_cmd + .args(&["rustc", "--target=wasm32-unknown-unknown"]) .arg(format!("--manifest-path={}", manifest_path.display())) .env("RUSTFLAGS", rustflags) // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). @@ -456,17 +467,16 @@ fn compact_wasm_file( let is_release_build = is_release_build(); let target = if is_release_build { "release" } else { "debug" }; let default_wasm_binary_name = get_wasm_binary_name(cargo_manifest); - let wasm_file = project.join("target/wasm32-unknown-unknown") + let wasm_file = project + .join("target/wasm32-unknown-unknown") .join(target) .join(format!("{}.wasm", default_wasm_binary_name)); let wasm_compact_file = if is_release_build { - let wasm_compact_file = project.join( - format!( - "{}.compact.wasm", - wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()), - ) - ); + let wasm_compact_file = project.join(format!( + "{}.compact.wasm", + wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()), + )); wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) .expect("Failed to compact generated WASM binary."); Some(WasmBinary(wasm_compact_file)) @@ -474,24 +484,19 @@ fn compact_wasm_file( None }; - let wasm_compact_compressed_file = wasm_compact_file.as_ref() - .and_then(|compact_binary| { - let file_name = wasm_binary_name.clone() - .unwrap_or_else(|| default_wasm_binary_name.clone()); - - let wasm_compact_compressed_file = project.join( - format!( - "{}.compact.compressed.wasm", - file_name, - ) - ); - - if compress_wasm(&compact_binary.0, &wasm_compact_compressed_file) { - Some(WasmBinary(wasm_compact_compressed_file)) - } else { - None - } - }); + let wasm_compact_compressed_file = wasm_compact_file.as_ref().and_then(|compact_binary| { + let file_name = + wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()); + + let wasm_compact_compressed_file = + project.join(format!("{}.compact.compressed.wasm", file_name,)); + + if compress_wasm(&compact_binary.0, &wasm_compact_compressed_file) { + Some(WasmBinary(wasm_compact_compressed_file)) + } else { + None + } + }); let bloaty_file_name = if let Some(name) = wasm_binary_name { format!("{}.wasm", name) @@ -502,24 +507,14 @@ fn compact_wasm_file( let bloaty_file = project.join(bloaty_file_name); fs::copy(wasm_file, &bloaty_file).expect("Copying the bloaty file to the project dir."); - ( - wasm_compact_file, - wasm_compact_compressed_file, - WasmBinaryBloaty(bloaty_file), - ) + (wasm_compact_file, wasm_compact_compressed_file, WasmBinaryBloaty(bloaty_file)) } -fn compress_wasm( - wasm_binary_path: &Path, - compressed_binary_out_path: &Path, -) -> bool { +fn compress_wasm(wasm_binary_path: &Path, compressed_binary_out_path: &Path) -> bool { use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; let data = fs::read(wasm_binary_path).expect("Failed to read WASM binary"); - if let Some(compressed) = sp_maybe_compressed_blob::compress( - &data, - CODE_BLOB_BOMB_LIMIT, - ) { + if let Some(compressed) = sp_maybe_compressed_blob::compress(&data, CODE_BLOB_BOMB_LIMIT) { fs::write(compressed_binary_out_path, &compressed[..]) .expect("Failed to write WASM binary"); @@ -590,7 +585,8 @@ fn generate_rerun_if_changed_instructions( .exec() .expect("`cargo metadata` can not fail!"); - let package = metadata.packages + let package = metadata + .packages .iter() .find(|p| p.manifest_path == cargo_manifest) .expect("The crate package is contained in its own metadata; qed"); @@ -603,12 +599,11 @@ fn generate_rerun_if_changed_instructions( packages.insert(DeduplicatePackage::from(package)); while let Some(dependency) = dependencies.pop() { - let path_or_git_dep = dependency.source - .as_ref() - .map(|s| s.starts_with("git+")) - .unwrap_or(true); + let path_or_git_dep = + dependency.source.as_ref().map(|s| s.starts_with("git+")).unwrap_or(true); - let package = metadata.packages + let package = metadata + .packages .iter() .filter(|p| !p.manifest_path.starts_with(wasm_workspace)) .find(|p| { @@ -649,9 +644,7 @@ fn package_rerun_if_changed(package: &DeduplicatePackage) { // Ignore this entry if it is a directory that contains a `Cargo.toml` that is not the // `Cargo.toml` related to the current package. This is done to ignore sub-crates of a crate. // If such a sub-crate is a dependency, it will be processed independently anyway. - p.path() == manifest_path - || !p.path().is_dir() - || !p.path().join("Cargo.toml").exists() + p.path() == manifest_path || !p.path().is_dir() || !p.path().join("Cargo.toml").exists() }) .filter_map(|p| p.ok().map(|p| p.into_path())) .filter(|p| { @@ -681,5 +674,6 @@ fn copy_wasm_to_target_directory(cargo_manifest: &Path, wasm_binary: &WasmBinary fs::copy( wasm_binary.wasm_binary_path(), target_dir.join(format!("{}.wasm", get_wasm_binary_name(cargo_manifest))), - ).expect("Copies WASM binary to `WASM_TARGET_DIRECTORY`."); + ) + .expect("Copies WASM binary to `WASM_TARGET_DIRECTORY`."); }

::storage_version(); log::info!( @@ -59,7 +56,7 @@ pub fn migrate< new_pallet_name.as_ref().as_bytes(), ); ::BlockWeights::get().max_block - } + }, _ => { log::warn!( target: "runtime::afg", @@ -75,11 +72,9 @@ pub fn migrate< /// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. /// /// Panics if anything goes wrong. -pub fn pre_migration< - T: frame_system::Config, - P: GetPalletVersion + 'static, - N: AsRef, ->(new: N) { +pub fn pre_migration>( + new: N, +) { let new = new.as_ref(); log::info!("pre-migration grandpa test with new = {}", new); @@ -119,10 +114,6 @@ pub fn post_migration() { log::info!("post-migration grandpa"); // Assert that nothing remains at the old prefix - assert!( - sp_io::storage::next_key(&twox_128(OLD_PREFIX)).map_or( - true, - |next_key| !next_key.starts_with(&twox_128(OLD_PREFIX)) - ) - ); + assert!(sp_io::storage::next_key(&twox_128(OLD_PREFIX)) + .map_or(true, |next_key| !next_key.starts_with(&twox_128(OLD_PREFIX)))); } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 768564c30105f66ee4bca5cb2daf9cee7b604923..882acdb4bcc127474a99e41acf29e814cdd6113c 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -19,13 +19,15 @@ #![cfg(test)] -use crate::{AuthorityId, AuthorityList, ConsensusLog, Config, self as pallet_grandpa}; +use crate::{self as pallet_grandpa, AuthorityId, AuthorityList, Config, ConsensusLog}; use ::grandpa as finality_grandpa; use codec::Encode; +use frame_election_provider_support::onchain; use frame_support::{ parameter_types, - traits::{KeyOwnerProofSystem, OnFinalize, OnInitialize, GenesisBuild}, + traits::{GenesisBuild, KeyOwnerProofSystem, OnFinalize, OnInitialize}, }; +use pallet_session::historical as pallet_session_historical; use pallet_staking::EraIndex; use sp_core::{crypto::KeyTypeId, H256}; use sp_finality_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; @@ -38,8 +40,6 @@ use sp_runtime::{ DigestItem, Perbill, }; use sp_staking::SessionIndex; -use pallet_session::historical as pallet_session_historical; -use frame_election_provider_support::onchain; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -275,13 +275,9 @@ pub fn new_test_ext(vec: Vec<(u64, u64)>) -> sp_io::TestExternalities { } pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let balances: Vec<_> = (0..authorities.len()) - .map(|i| (i as u64, 10_000_000)) - .collect(); + let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); pallet_balances::GenesisConfig:: { balances } .assimilate_storage(&mut t) @@ -295,9 +291,7 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx ( i as u64, i as u64, - TestSessionKeys { - grandpa_authority: AuthorityId::from(k.clone()), - }, + TestSessionKeys { grandpa_authority: AuthorityId::from(k.clone()) }, ) }) .collect(); @@ -311,12 +305,7 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx // controllers are the index + 1000 let stakers: Vec<_> = (0..authorities.len()) .map(|i| { - ( - i as u64, - i as u64 + 1000, - 10_000, - pallet_staking::StakerStatus::::Validator, - ) + (i as u64, i as u64 + 1000, 10_000, pallet_staking::StakerStatus::::Validator) }) .collect(); @@ -348,12 +337,7 @@ pub fn start_session(session_index: SessionIndex) { System::parent_hash() }; - System::initialize( - &(i as u64 + 1), - &parent_hash, - &Default::default(), - Default::default(), - ); + System::initialize(&(i as u64 + 1), &parent_hash, &Default::default(), Default::default()); System::set_block_number((i + 1).into()); Timestamp::set_timestamp(System::block_number() * 6000); @@ -372,12 +356,7 @@ pub fn start_era(era_index: EraIndex) { } pub fn initialize_block(number: u64, parent_hash: H256) { - System::initialize( - &number, - &parent_hash, - &Default::default(), - Default::default(), - ); + System::initialize(&number, &parent_hash, &Default::default(), Default::default()); } pub fn generate_equivocation_proof( @@ -386,10 +365,7 @@ pub fn generate_equivocation_proof( vote2: (RoundNumber, H256, u64, &Ed25519Keyring), ) -> sp_finality_grandpa::EquivocationProof { let signed_prevote = |round, hash, number, keyring: &Ed25519Keyring| { - let prevote = finality_grandpa::Prevote { - target_hash: hash, - target_number: number, - }; + let prevote = finality_grandpa::Prevote { target_hash: hash, target_number: number }; let prevote_msg = finality_grandpa::Message::Prevote(prevote.clone()); let payload = sp_finality_grandpa::localized_payload(round, set_id, &prevote_msg); diff --git a/substrate/frame/grandpa/src/tests.rs b/substrate/frame/grandpa/src/tests.rs index 0692102771bfbb117fe229ac4e090fd092636820..8337876d88bc0915e8516874530bad1499df5dae 100644 --- a/substrate/frame/grandpa/src/tests.rs +++ b/substrate/frame/grandpa/src/tests.rs @@ -24,7 +24,7 @@ use crate::mock::*; use codec::Encode; use fg_primitives::ScheduledChange; use frame_support::{ - assert_err, assert_ok, assert_noop, + assert_err, assert_noop, assert_ok, traits::{Currency, OnFinalize, OneSessionHandler}, weights::{GetDispatchInfo, Pays}, }; @@ -43,21 +43,24 @@ fn authorities_change_logged() { Grandpa::on_finalize(1); let header = System::finalize(); - assert_eq!(header.digest, Digest { - logs: vec![ - grandpa_log(ConsensusLog::ScheduledChange( - ScheduledChange { delay: 0, next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) } - )), - ], - }); - - assert_eq!(System::events(), vec![ - EventRecord { + assert_eq!( + header.digest, + Digest { + logs: vec![grandpa_log(ConsensusLog::ScheduledChange(ScheduledChange { + delay: 0, + next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + })),], + } + ); + + assert_eq!( + System::events(), + vec![EventRecord { phase: Phase::Finalization, event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), topics: vec![], - }, - ]); + },] + ); }); } @@ -68,13 +71,15 @@ fn authorities_change_logged_after_delay() { Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); Grandpa::on_finalize(1); let header = System::finalize(); - assert_eq!(header.digest, Digest { - logs: vec![ - grandpa_log(ConsensusLog::ScheduledChange( - ScheduledChange { delay: 1, next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) } - )), - ], - }); + assert_eq!( + header.digest, + Digest { + logs: vec![grandpa_log(ConsensusLog::ScheduledChange(ScheduledChange { + delay: 1, + next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + })),], + } + ); // no change at this height. assert_eq!(System::events(), vec![]); @@ -84,13 +89,14 @@ fn authorities_change_logged_after_delay() { Grandpa::on_finalize(2); let _header = System::finalize(); - assert_eq!(System::events(), vec![ - EventRecord { + assert_eq!( + System::events(), + vec![EventRecord { phase: Phase::Finalization, event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), topics: vec![], - }, - ]); + },] + ); }); } @@ -131,11 +137,7 @@ fn cannot_schedule_change_when_one_pending() { fn dispatch_forced_change() { new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { initialize_block(1, Default::default()); - Grandpa::schedule_change( - to_authorities(vec![(4, 1), (5, 1), (6, 1)]), - 5, - Some(0), - ).unwrap(); + Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap(); assert!(>::exists()); assert_noop!( @@ -168,7 +170,10 @@ fn dispatch_forced_change() { { initialize_block(7, header.hash()); assert!(!>::exists()); - assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); + assert_eq!( + Grandpa::grandpa_authorities(), + to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + ); assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(7); header = System::finalize(); @@ -178,7 +183,10 @@ fn dispatch_forced_change() { { initialize_block(8, header.hash()); assert!(>::exists()); - assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); + assert_eq!( + Grandpa::grandpa_authorities(), + to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + ); assert_noop!( Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), Error::::ChangePending @@ -205,7 +213,11 @@ fn dispatch_forced_change() { { initialize_block(11, header.hash()); assert!(!>::exists()); - assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0))); + assert_ok!(Grandpa::schedule_change( + to_authorities(vec![(5, 1), (6, 1), (7, 1)]), + 5, + Some(0) + )); assert_eq!(Grandpa::next_forced(), Some(21)); Grandpa::on_finalize(11); header = System::finalize(); @@ -222,13 +234,7 @@ fn schedule_pause_only_when_live() { Grandpa::schedule_pause(1).unwrap(); // we've switched to the pending pause state - assert_eq!( - Grandpa::state(), - StoredState::PendingPause { - scheduled_at: 1u64, - delay: 1, - }, - ); + assert_eq!(Grandpa::state(), StoredState::PendingPause { scheduled_at: 1u64, delay: 1 },); Grandpa::on_finalize(1); let _ = System::finalize(); @@ -242,10 +248,7 @@ fn schedule_pause_only_when_live() { let _ = System::finalize(); // after finalizing block 2 the set should have switched to paused state - assert_eq!( - Grandpa::state(), - StoredState::Paused, - ); + assert_eq!(Grandpa::state(), StoredState::Paused,); }); } @@ -257,20 +260,14 @@ fn schedule_resume_only_when_paused() { // the set is currently live, resuming it is an error assert_noop!(Grandpa::schedule_resume(1), Error::::ResumeFailed); - assert_eq!( - Grandpa::state(), - StoredState::Live, - ); + assert_eq!(Grandpa::state(), StoredState::Live,); // we schedule a pause to be applied instantly Grandpa::schedule_pause(0).unwrap(); Grandpa::on_finalize(1); let _ = System::finalize(); - assert_eq!( - Grandpa::state(), - StoredState::Paused, - ); + assert_eq!(Grandpa::state(), StoredState::Paused,); // we schedule the set to go back live in 2 blocks initialize_block(2, Default::default()); @@ -287,10 +284,7 @@ fn schedule_resume_only_when_paused() { let _ = System::finalize(); // it should be live at block 4 - assert_eq!( - Grandpa::state(), - StoredState::Live, - ); + assert_eq!(Grandpa::state(), StoredState::Live,); }); } @@ -298,26 +292,11 @@ fn schedule_resume_only_when_paused() { fn time_slot_have_sane_ord() { // Ensure that `Ord` implementation is sane. const FIXTURE: &[GrandpaTimeSlot] = &[ - GrandpaTimeSlot { - set_id: 0, - round: 0, - }, - GrandpaTimeSlot { - set_id: 0, - round: 1, - }, - GrandpaTimeSlot { - set_id: 1, - round: 0, - }, - GrandpaTimeSlot { - set_id: 1, - round: 1, - }, - GrandpaTimeSlot { - set_id: 1, - round: 2, - } + GrandpaTimeSlot { set_id: 0, round: 0 }, + GrandpaTimeSlot { set_id: 0, round: 1 }, + GrandpaTimeSlot { set_id: 1, round: 0 }, + GrandpaTimeSlot { set_id: 1, round: 1 }, + GrandpaTimeSlot { set_id: 1, round: 2 }, ]; assert!(FIXTURE.windows(2).all(|f| f[0] < f[1])); } @@ -325,16 +304,9 @@ fn time_slot_have_sane_ord() { /// Returns a list with 3 authorities with known keys: /// Alice, Bob and Charlie. pub fn test_authorities() -> AuthorityList { - let authorities = vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ]; + let authorities = vec![Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - authorities - .into_iter() - .map(|id| (id.public().into(), 1u64)) - .collect() + authorities.into_iter().map(|id| (id.public().into(), 1u64)).collect() } #[test] @@ -357,11 +329,7 @@ fn report_equivocation_current_set_works() { assert_eq!( Staking::eras_stakers(1, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -384,13 +352,11 @@ fn report_equivocation_current_set_works() { Historical::prove((sp_finality_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); // report the equivocation and the tx should be dispatched successfully - assert_ok!( - Grandpa::report_equivocation_unsigned( - Origin::none(), - equivocation_proof, - key_owner_proof, - ), - ); + assert_ok!(Grandpa::report_equivocation_unsigned( + Origin::none(), + equivocation_proof, + key_owner_proof, + ),); start_era(2); @@ -401,17 +367,13 @@ fn report_equivocation_current_set_works() { assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( Staking::eras_stakers(2, equivocation_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); @@ -419,11 +381,7 @@ fn report_equivocation_current_set_works() { assert_eq!( Staking::eras_stakers(2, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } }); @@ -455,11 +413,7 @@ fn report_equivocation_old_set_works() { assert_eq!( Staking::eras_stakers(2, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } @@ -476,13 +430,11 @@ fn report_equivocation_old_set_works() { // report the equivocation using the key ownership proof generated on // the old set, the tx should be dispatched successfully - assert_ok!( - Grandpa::report_equivocation_unsigned( - Origin::none(), - equivocation_proof, - key_owner_proof, - ), - ); + assert_ok!(Grandpa::report_equivocation_unsigned( + Origin::none(), + equivocation_proof, + key_owner_proof, + ),); start_era(3); @@ -494,17 +446,13 @@ fn report_equivocation_old_set_works() { assert_eq!( Staking::eras_stakers(3, equivocation_validator_id), - pallet_staking::Exposure { - total: 0, - own: 0, - others: vec![], - }, + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); // check that the balances of all other validators are left intact. for validator in &validators { if *validator == equivocation_validator_id { - continue; + continue } assert_eq!(Balances::total_balance(validator), 10_000_000); @@ -512,11 +460,7 @@ fn report_equivocation_old_set_works() { assert_eq!( Staking::eras_stakers(3, validator), - pallet_staking::Exposure { - total: 10_000, - own: 10_000, - others: vec![], - }, + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, ); } }); @@ -737,10 +681,8 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { let key_owner_proof = Historical::prove((sp_finality_grandpa::KEY_TYPE, &equivocation_key)).unwrap(); - let call = Call::report_equivocation_unsigned( - equivocation_proof.clone(), - key_owner_proof.clone(), - ); + let call = + Call::report_equivocation_unsigned(equivocation_proof.clone(), key_owner_proof.clone()); // only local/inblock reports are allowed assert_eq!( @@ -752,11 +694,7 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { ); // the transaction is valid when passed as local - let tx_tag = ( - equivocation_key, - set_id, - 1u64, - ); + let tx_tag = (equivocation_key, set_id, 1u64); assert_eq!( ::validate_unsigned( @@ -861,23 +799,19 @@ fn always_schedules_a_change_on_new_session_when_stalled() { fn report_equivocation_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. - assert!( - (1..=100) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] == w[1]) - ); + assert!((1..=100) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] == w[1])); // after 100 validators the weight should keep increasing // with every extra validator. - assert!( - (100..=1000) - .map(::WeightInfo::report_equivocation) - .collect::>() - .windows(2) - .all(|w| w[0] < w[1]) - ); + assert!((100..=1000) + .map(::WeightInfo::report_equivocation) + .collect::>() + .windows(2) + .all(|w| w[0] < w[1])); } #[test] diff --git a/substrate/frame/identity/src/benchmarking.rs b/substrate/frame/identity/src/benchmarking.rs index 4fb76fcb4138c02fbb786c32be3ce74af04bc822..5cae65818145d61580151ebae74ca9ccc9a49b39 100644 --- a/substrate/frame/identity/src/benchmarking.rs +++ b/substrate/frame/identity/src/benchmarking.rs @@ -21,11 +21,11 @@ use super::*; +use crate::Pallet as Identity; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_support::{ensure, traits::Get}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use frame_support::{ensure, traits::Get}; -use crate::Pallet as Identity; const SEED: u32 = 0; @@ -39,11 +39,19 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { let registrar: T::AccountId = account("registrar", i, SEED); let _ = T::Currency::make_free_balance_be(®istrar, BalanceOf::::max_value()); Identity::::add_registrar(RawOrigin::Root.into(), registrar.clone())?; - Identity::::set_fee(RawOrigin::Signed(registrar.clone()).into(), i.into(), 10u32.into())?; - let fields = IdentityFields( - IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot - | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter - ); + Identity::::set_fee( + RawOrigin::Signed(registrar.clone()).into(), + i.into(), + 10u32.into(), + )?; + let fields = + IdentityFields( + IdentityField::Display | + IdentityField::Legal | IdentityField::Web | + IdentityField::Riot | IdentityField::Email | + IdentityField::PgpFingerprint | + IdentityField::Image | IdentityField::Twitter, + ); Identity::::set_fields(RawOrigin::Signed(registrar.clone()).into(), i.into(), fields)?; } @@ -53,7 +61,10 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { // Create `s` sub-accounts for the identity of `who` and return them. // Each will have 32 bytes of raw data added to it. -fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn create_sub_accounts( + who: &T::AccountId, + s: u32, +) -> Result, &'static str> { let mut subs = Vec::new(); let who_origin = RawOrigin::Signed(who.clone()); let data = Data::Raw(vec![0; 32].try_into().unwrap()); @@ -73,7 +84,10 @@ fn create_sub_accounts(who: &T::AccountId, s: u32) -> Result(who: &T::AccountId, s: u32) -> Result, &'static str> { +fn add_sub_accounts( + who: &T::AccountId, + s: u32, +) -> Result, &'static str> { let who_origin = RawOrigin::Signed(who.clone()); let subs = create_sub_accounts::(who, s)?; @@ -399,8 +413,4 @@ benchmarks! { } -impl_benchmark_test_suite!( - Identity, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/identity/src/lib.rs b/substrate/frame/identity/src/lib.rs index f6e3f0639f16a1bf59f676a6cc82e51d8a8fdb21..7b401d95573f620e5b9c6477651dc620a04d4d3a 100644 --- a/substrate/frame/identity/src/lib.rs +++ b/substrate/frame/identity/src/lib.rs @@ -72,32 +72,34 @@ #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; #[cfg(test)] mod tests; mod types; -mod benchmarking; pub mod weights; -use sp_std::prelude::*; -use sp_std::convert::TryInto; -use sp_runtime::traits::{StaticLookup, Zero, AppendZerosInput, Saturating}; use frame_support::traits::{BalanceStatus, Currency, OnUnbalanced, ReservableCurrency}; +use sp_runtime::traits::{AppendZerosInput, Saturating, StaticLookup, Zero}; +use sp_std::{convert::TryInto, prelude::*}; pub use weights::WeightInfo; pub use pallet::*; pub use types::{ - Data, IdentityField, IdentityFields, IdentityInfo, Judgement, RegistrarIndex, - RegistrarInfo, Registration, + Data, IdentityField, IdentityFields, IdentityInfo, Judgement, RegistrarIndex, RegistrarInfo, + Registration, }; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { @@ -121,7 +123,6 @@ pub mod pallet { #[pallet::constant] type SubAccountDeposit: Get>; - /// The maximum number of sub-accounts allowed per identified account. #[pallet::constant] type MaxSubAccounts: Get; @@ -171,13 +172,8 @@ pub mod pallet { /// context. If the account is not some other account's sub-identity, then just `None`. #[pallet::storage] #[pallet::getter(fn super_of)] - pub(super) type SuperOf = StorageMap< - _, - Blake2_128Concat, - T::AccountId, - (T::AccountId, Data), - OptionQuery, - >; + pub(super) type SuperOf = + StorageMap<_, Blake2_128Concat, T::AccountId, (T::AccountId, Data), OptionQuery>; /// Alternative "sub" identities of this account. /// @@ -239,7 +235,7 @@ pub mod pallet { /// Sender is not a sub-account. NotSub, /// Sub-account isn't owned by sender. - NotOwned + NotOwned, } #[pallet::event] @@ -290,17 +286,23 @@ pub mod pallet { /// - One event. /// # #[pallet::weight(T::WeightInfo::add_registrar(T::MaxRegistrars::get()))] - pub fn add_registrar(origin: OriginFor, account: T::AccountId) -> DispatchResultWithPostInfo { + pub fn add_registrar( + origin: OriginFor, + account: T::AccountId, + ) -> DispatchResultWithPostInfo { T::RegistrarOrigin::ensure_origin(origin)?; let (i, registrar_count) = >::try_mutate( |registrars| -> Result<(RegistrarIndex, usize), DispatchError> { - registrars.try_push(Some(RegistrarInfo { - account, fee: Zero::zero(), fields: Default::default() - })) - .map_err(|_| Error::::TooManyRegistrars)?; + registrars + .try_push(Some(RegistrarInfo { + account, + fee: Zero::zero(), + fields: Default::default(), + })) + .map_err(|_| Error::::TooManyRegistrars)?; Ok(((registrars.len() - 1) as RegistrarIndex, registrars.len())) - } + }, )?; Self::deposit_event(Event::RegistrarAdded(i)); @@ -331,7 +333,10 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub fn set_identity(origin: OriginFor, info: IdentityInfo) -> DispatchResultWithPostInfo { + pub fn set_identity( + origin: OriginFor, + info: IdentityInfo, + ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let extra_fields = info.additional.len() as u32; ensure!(extra_fields <= T::MaxAdditionalFields::get(), Error::::TooManyFields); @@ -343,8 +348,9 @@ pub mod pallet { id.judgements.retain(|j| j.1.is_sticky()); id.info = info; id - } - None => Registration { info, judgements: BoundedVec::default(), deposit: Zero::zero() }, + }, + None => + Registration { info, judgements: BoundedVec::default(), deposit: Zero::zero() }, }; let old_deposit = id.deposit; @@ -363,8 +369,9 @@ pub mod pallet { Ok(Some(T::WeightInfo::set_identity( judgements as u32, // R - extra_fields // X - )).into()) + extra_fields, // X + )) + .into()) } /// Set the sub-accounts of the sender. @@ -397,15 +404,22 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::set_subs_old(T::MaxSubAccounts::get()) // P: Assume max sub accounts removed. .saturating_add(T::WeightInfo::set_subs_new(subs.len() as u32)) // S: Assume all subs are new. )] - pub fn set_subs(origin: OriginFor, subs: Vec<(T::AccountId, Data)>) -> DispatchResultWithPostInfo { + pub fn set_subs( + origin: OriginFor, + subs: Vec<(T::AccountId, Data)>, + ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; ensure!(>::contains_key(&sender), Error::::NotFound); - ensure!(subs.len() <= T::MaxSubAccounts::get() as usize, Error::::TooManySubAccounts); + ensure!( + subs.len() <= T::MaxSubAccounts::get() as usize, + Error::::TooManySubAccounts + ); let (old_deposit, old_ids) = >::get(&sender); let new_deposit = T::SubAccountDeposit::get() * >::from(subs.len() as u32); - let not_other_sub = subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| &i.0 == &sender); + let not_other_sub = + subs.iter().filter_map(|i| SuperOf::::get(&i.0)).all(|i| &i.0 == &sender); ensure!(not_other_sub, Error::::AlreadyClaimed); if old_deposit < new_deposit { @@ -434,8 +448,9 @@ pub mod pallet { Ok(Some( T::WeightInfo::set_subs_old(old_ids.len() as u32) // P: Real number of old accounts removed. - .saturating_add(T::WeightInfo::set_subs_new(new_subs as u32)) // S: New subs added. - ).into()) + .saturating_add(T::WeightInfo::set_subs_new(new_subs as u32)), /* S: New subs added. */ + ) + .into()) } /// Clear an account's identity info and all sub-accounts and return all deposits. @@ -477,10 +492,11 @@ pub mod pallet { Self::deposit_event(Event::IdentityCleared(sender, deposit)); Ok(Some(T::WeightInfo::clear_identity( - id.judgements.len() as u32, // R - sub_ids.len() as u32, // S - id.info.additional.len() as u32 // X - )).into()) + id.judgements.len() as u32, // R + sub_ids.len() as u32, // S + id.info.additional.len() as u32, // X + )) + .into()) } /// Request a judgement from a registrar. @@ -510,28 +526,30 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub fn request_judgement(origin: OriginFor, + pub fn request_judgement( + origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, #[pallet::compact] max_fee: BalanceOf, ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let registrars = >::get(); - let registrar = registrars.get(reg_index as usize).and_then(Option::as_ref) + let registrar = registrars + .get(reg_index as usize) + .and_then(Option::as_ref) .ok_or(Error::::EmptyIndex)?; ensure!(max_fee >= registrar.fee, Error::::FeeChanged); let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; let item = (reg_index, Judgement::FeePaid(registrar.fee)); match id.judgements.binary_search_by_key(®_index, |x| x.0) { - Ok(i) => if id.judgements[i].1.is_sticky() { - Err(Error::::StickyJudgement)? - } else { - id.judgements[i] = item - }, - Err(i) => id - .judgements - .try_insert(i, item) - .map_err(|_| Error::::TooManyRegistrars)?, + Ok(i) => + if id.judgements[i].1.is_sticky() { + Err(Error::::StickyJudgement)? + } else { + id.judgements[i] = item + }, + Err(i) => + id.judgements.try_insert(i, item).map_err(|_| Error::::TooManyRegistrars)?, } T::Currency::reserve(&sender, registrar.fee)?; @@ -542,10 +560,8 @@ pub mod pallet { Self::deposit_event(Event::JudgementRequested(sender, reg_index)); - Ok(Some(T::WeightInfo::request_judgement( - judgements as u32, - extra_fields as u32, - )).into()) + Ok(Some(T::WeightInfo::request_judgement(judgements as u32, extra_fields as u32)) + .into()) } /// Cancel a previous request. @@ -569,11 +585,16 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub fn cancel_request(origin: OriginFor, reg_index: RegistrarIndex) -> DispatchResultWithPostInfo { + pub fn cancel_request( + origin: OriginFor, + reg_index: RegistrarIndex, + ) -> DispatchResultWithPostInfo { let sender = ensure_signed(origin)?; let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; - let pos = id.judgements.binary_search_by_key(®_index, |x| x.0) + let pos = id + .judgements + .binary_search_by_key(®_index, |x| x.0) .map_err(|_| Error::::NotFound)?; let fee = if let Judgement::FeePaid(fee) = id.judgements.remove(pos).1 { fee @@ -589,10 +610,7 @@ pub mod pallet { Self::deposit_event(Event::JudgementUnrequested(sender, reg_index)); - Ok(Some(T::WeightInfo::cancel_request( - judgements as u32, - extra_fields as u32 - )).into()) + Ok(Some(T::WeightInfo::cancel_request(judgements as u32, extra_fields as u32)).into()) } /// Set the fee required for a judgement to be requested from a registrar. @@ -609,7 +627,8 @@ pub mod pallet { /// - Benchmark: 7.315 + R * 0.329 µs (min squares analysis) /// # #[pallet::weight(T::WeightInfo::set_fee(T::MaxRegistrars::get()))] // R - pub fn set_fee(origin: OriginFor, + pub fn set_fee( + origin: OriginFor, #[pallet::compact] index: RegistrarIndex, #[pallet::compact] fee: BalanceOf, ) -> DispatchResultWithPostInfo { @@ -618,7 +637,14 @@ pub mod pallet { let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.fee = fee; Some(()) } else { None }) + .and_then(|r| { + if r.account == who { + r.fee = fee; + Some(()) + } else { + None + } + }) .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; @@ -639,7 +665,8 @@ pub mod pallet { /// - Benchmark: 8.823 + R * 0.32 µs (min squares analysis) /// # #[pallet::weight(T::WeightInfo::set_account_id(T::MaxRegistrars::get()))] // R - pub fn set_account_id(origin: OriginFor, + pub fn set_account_id( + origin: OriginFor, #[pallet::compact] index: RegistrarIndex, new: T::AccountId, ) -> DispatchResultWithPostInfo { @@ -648,7 +675,14 @@ pub mod pallet { let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.account = new; Some(()) } else { None }) + .and_then(|r| { + if r.account == who { + r.account = new; + Some(()) + } else { + None + } + }) .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; @@ -669,7 +703,8 @@ pub mod pallet { /// - Benchmark: 7.464 + R * 0.325 µs (min squares analysis) /// # #[pallet::weight(T::WeightInfo::set_fields(T::MaxRegistrars::get()))] // R - pub fn set_fields(origin: OriginFor, + pub fn set_fields( + origin: OriginFor, #[pallet::compact] index: RegistrarIndex, fields: IdentityFields, ) -> DispatchResultWithPostInfo { @@ -678,13 +713,21 @@ pub mod pallet { let registrars = >::mutate(|rs| -> Result { rs.get_mut(index as usize) .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.fields = fields; Some(()) } else { None }) + .and_then(|r| { + if r.account == who { + r.fields = fields; + Some(()) + } else { + None + } + }) .ok_or_else(|| DispatchError::from(Error::::InvalidIndex))?; Ok(rs.len()) })?; Ok(Some(T::WeightInfo::set_fields( - registrars as u32 // R - )).into()) + registrars as u32, // R + )) + .into()) } /// Provide a judgement for an account's identity. @@ -710,7 +753,8 @@ pub mod pallet { T::MaxRegistrars::get().into(), // R T::MaxAdditionalFields::get().into(), // X ))] - pub fn provide_judgement(origin: OriginFor, + pub fn provide_judgement( + origin: OriginFor, #[pallet::compact] reg_index: RegistrarIndex, target: ::Source, judgement: Judgement>, @@ -729,10 +773,15 @@ pub mod pallet { match id.judgements.binary_search_by_key(®_index, |x| x.0) { Ok(position) => { if let Judgement::FeePaid(fee) = id.judgements[position].1 { - let _ = T::Currency::repatriate_reserved(&target, &sender, fee, BalanceStatus::Free); + let _ = T::Currency::repatriate_reserved( + &target, + &sender, + fee, + BalanceStatus::Free, + ); } id.judgements[position] = item - } + }, Err(position) => id .judgements .try_insert(position, item) @@ -744,10 +793,8 @@ pub mod pallet { >::insert(&target, id); Self::deposit_event(Event::JudgementGiven(target, reg_index)); - Ok(Some(T::WeightInfo::provide_judgement( - judgements as u32, - extra_fields as u32, - )).into()) + Ok(Some(T::WeightInfo::provide_judgement(judgements as u32, extra_fields as u32)) + .into()) } /// Remove an account's identity and sub-account information and slash the deposits. @@ -775,7 +822,8 @@ pub mod pallet { T::MaxAdditionalFields::get().into(), // X ))] pub fn kill_identity( - origin: OriginFor, target: ::Source + origin: OriginFor, + target: ::Source, ) -> DispatchResultWithPostInfo { T::ForceOrigin::ensure_origin(origin)?; @@ -794,10 +842,11 @@ pub mod pallet { Self::deposit_event(Event::IdentityKilled(target, deposit)); Ok(Some(T::WeightInfo::kill_identity( - id.judgements.len() as u32, // R - sub_ids.len() as u32, // S - id.info.additional.len() as u32 // X - )).into()) + id.judgements.len() as u32, // R + sub_ids.len() as u32, // S + id.info.additional.len() as u32, // X + )) + .into()) } /// Add the given account to the sender's subs. @@ -808,7 +857,11 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::add_sub(T::MaxSubAccounts::get()))] - pub fn add_sub(origin: OriginFor, sub: ::Source, data: Data) -> DispatchResult { + pub fn add_sub( + origin: OriginFor, + sub: ::Source, + data: Data, + ) -> DispatchResult { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); @@ -818,7 +871,10 @@ pub mod pallet { SubsOf::::try_mutate(&sender, |(ref mut subs_deposit, ref mut sub_ids)| { // Ensure there is space and that the deposit is paid. - ensure!(sub_ids.len() < T::MaxSubAccounts::get() as usize, Error::::TooManySubAccounts); + ensure!( + sub_ids.len() < T::MaxSubAccounts::get() as usize, + Error::::TooManySubAccounts + ); let deposit = T::SubAccountDeposit::get(); T::Currency::reserve(&sender, deposit)?; @@ -837,7 +893,9 @@ pub mod pallet { /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::rename_sub(T::MaxSubAccounts::get()))] pub fn rename_sub( - origin: OriginFor, sub: ::Source, data: Data + origin: OriginFor, + sub: ::Source, + data: Data, ) -> DispatchResult { let sender = ensure_signed(origin)?; let sub = T::Lookup::lookup(sub)?; @@ -855,7 +913,10 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_ and the sender must have a registered /// sub identity of `sub`. #[pallet::weight(T::WeightInfo::remove_sub(T::MaxSubAccounts::get()))] - pub fn remove_sub(origin: OriginFor, sub: ::Source) -> DispatchResult { + pub fn remove_sub( + origin: OriginFor, + sub: ::Source, + ) -> DispatchResult { let sender = ensure_signed(origin)?; ensure!(IdentityOf::::contains_key(&sender), Error::::NoIdentity); let sub = T::Lookup::lookup(sub)?; @@ -891,19 +952,20 @@ pub mod pallet { sub_ids.retain(|x| x != &sender); let deposit = T::SubAccountDeposit::get().min(*subs_deposit); *subs_deposit -= deposit; - let _ = T::Currency::repatriate_reserved(&sup, &sender, deposit, BalanceStatus::Free); + let _ = + T::Currency::repatriate_reserved(&sup, &sender, deposit, BalanceStatus::Free); Self::deposit_event(Event::SubIdentityRevoked(sender, sup.clone(), deposit)); }); Ok(()) } } - } impl Pallet { /// Get the subs of an account. pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { - SubsOf::::get(who).1 + SubsOf::::get(who) + .1 .into_iter() .filter_map(|a| SuperOf::::get(&a).map(|x| (a, x.1))) .collect() diff --git a/substrate/frame/identity/src/tests.rs b/substrate/frame/identity/src/tests.rs index 7a8bb4fa6d92e3a0a1303b9ebb098ec7933db3a7..127b0a9ecb1713d0e38a41447d353ac815c4e8f6 100644 --- a/substrate/frame/identity/src/tests.rs +++ b/substrate/frame/identity/src/tests.rs @@ -20,13 +20,13 @@ use super::*; use crate as pallet_identity; -use codec::{Encode, Decode}; -use sp_runtime::traits::BadOrigin; -use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types, BoundedVec}; +use codec::{Decode, Encode}; +use frame_support::{assert_noop, assert_ok, ord_parameter_types, parameter_types, BoundedVec}; +use frame_system::{EnsureOneOf, EnsureRoot, EnsureSignedBy}; use sp_core::H256; -use frame_system::{EnsureSignedBy, EnsureOneOf, EnsureRoot}; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup}, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -100,16 +100,8 @@ ord_parameter_types! { pub const One: u64 = 1; pub const Two: u64 = 2; } -type EnsureOneOrRoot = EnsureOneOf< - u64, - EnsureRoot, - EnsureSignedBy ->; -type EnsureTwoOrRoot = EnsureOneOf< - u64, - EnsureRoot, - EnsureSignedBy ->; +type EnsureOneOrRoot = EnsureOneOf, EnsureSignedBy>; +type EnsureTwoOrRoot = EnsureOneOf, EnsureSignedBy>; impl pallet_identity::Config for Test { type Event = Event; type Currency = Balances; @@ -128,15 +120,10 @@ impl pallet_identity::Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10), - (2, 10), - (3, 10), - (10, 100), - (20, 100), - (30, 100), - ], - }.assimilate_storage(&mut t).unwrap(); + balances: vec![(1, 10), (2, 10), (3, 10), (10, 100), (20, 100), (30, 100)], + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } @@ -144,7 +131,7 @@ fn ten() -> IdentityInfo { IdentityInfo { display: Data::Raw(b"ten".to_vec().try_into().unwrap()), legal: Data::Raw(b"The Right Ordinal Ten, Esq.".to_vec().try_into().unwrap()), - .. Default::default() + ..Default::default() } } @@ -152,7 +139,7 @@ fn twenty() -> IdentityInfo { IdentityInfo { display: Data::Raw(b"twenty".to_vec().try_into().unwrap()), legal: Data::Raw(b"The Right Ordinal Twenty, Esq.".to_vec().try_into().unwrap()), - .. Default::default() + ..Default::default() } } @@ -177,7 +164,10 @@ fn editing_subaccounts_should_work() { assert_eq!(Balances::free_balance(10), 70); // third sub account is too many - assert_noop!(Identity::add_sub(Origin::signed(10), 3, data(3)), Error::::TooManySubAccounts); + assert_noop!( + Identity::add_sub(Origin::signed(10), 3, data(3)), + Error::::TooManySubAccounts + ); // rename first sub account assert_ok!(Identity::rename_sub(Origin::signed(10), 1, data(11))); @@ -214,7 +204,10 @@ fn resolving_subaccount_ownership_works() { assert_eq!(Balances::free_balance(10), 80); assert_eq!(Balances::reserved_balance(10), 20); // 20 cannot claim 1 now - assert_noop!(Identity::add_sub(Origin::signed(20), 1, data(1)), Error::::AlreadyClaimed); + assert_noop!( + Identity::add_sub(Origin::signed(20), 1, data(1)), + Error::::AlreadyClaimed + ); // 1 wants to be with 20 so it quits from 10 assert_ok!(Identity::quit_sub(Origin::signed(1))); // 1 gets the 10 that 10 paid. @@ -243,9 +236,10 @@ fn adding_registrar_should_work() { assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); - assert_eq!(Identity::registrars(), vec![ - Some(RegistrarInfo { account: 3, fee: 10, fields }) - ]); + assert_eq!( + Identity::registrars(), + vec![Some(RegistrarInfo { account: 3, fee: 10, fields })] + ); }); } @@ -370,7 +364,10 @@ fn setting_subaccounts_should_work() { assert_eq!(Identity::super_of(40), None); subs.push((20, Data::Raw(vec![40; 1].try_into().unwrap()))); - assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::TooManySubAccounts); + assert_noop!( + Identity::set_subs(Origin::signed(10), subs.clone()), + Error::::TooManySubAccounts + ); }); } @@ -378,7 +375,10 @@ fn setting_subaccounts_should_work() { fn clearing_account_should_remove_subaccounts_and_refund() { new_test_ext().execute_with(|| { assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))])); + assert_ok!(Identity::set_subs( + Origin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); assert_ok!(Identity::clear_identity(Origin::signed(10))); assert_eq!(Balances::free_balance(10), 100); assert!(Identity::super_of(20).is_none()); @@ -389,7 +389,10 @@ fn clearing_account_should_remove_subaccounts_and_refund() { fn killing_account_should_remove_subaccounts_and_not_refund() { new_test_ext().execute_with(|| { assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))])); + assert_ok!(Identity::set_subs( + Origin::signed(10), + vec![(20, Data::Raw(vec![40; 1].try_into().unwrap()))] + )); assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); assert_eq!(Balances::free_balance(10), 80); assert!(Identity::super_of(20).is_none()); @@ -409,7 +412,10 @@ fn cancelling_requested_judgement_should_work() { assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::JudgementGiven); + assert_noop!( + Identity::cancel_request(Origin::signed(10), 0), + Error::::JudgementGiven + ); }); } @@ -419,19 +425,28 @@ fn requesting_judgement_should_work() { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 9), Error::::FeeChanged); + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 9), + Error::::FeeChanged + ); assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); // 10 for the judgement request, 10 for the identity. assert_eq!(Balances::free_balance(10), 80); // Re-requesting won't work as we already paid. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 10), + Error::::StickyJudgement + ); assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Erroneous)); // Registrar got their payment now. assert_eq!(Balances::free_balance(3), 20); // Re-requesting still won't work as it's erroneous. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 10), + Error::::StickyJudgement + ); // Requesting from a second registrar still works. assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); @@ -448,14 +463,24 @@ fn field_deposit_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), IdentityInfo { - additional: vec![ - (Data::Raw(b"number".to_vec().try_into().unwrap()), Data::Raw(10u32.encode().try_into().unwrap())), - (Data::Raw(b"text".to_vec().try_into().unwrap()), Data::Raw(b"10".to_vec().try_into().unwrap())), - ] - .try_into() - .unwrap(), .. Default::default() - })); + assert_ok!(Identity::set_identity( + Origin::signed(10), + IdentityInfo { + additional: vec![ + ( + Data::Raw(b"number".to_vec().try_into().unwrap()), + Data::Raw(10u32.encode().try_into().unwrap()) + ), + ( + Data::Raw(b"text".to_vec().try_into().unwrap()), + Data::Raw(b"10".to_vec().try_into().unwrap()) + ), + ] + .try_into() + .unwrap(), + ..Default::default() + } + )); assert_eq!(Balances::free_balance(10), 70); }); } @@ -465,7 +490,10 @@ fn setting_account_id_should_work() { new_test_ext().execute_with(|| { assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); // account 4 cannot change the first registrar's identity since it's owned by 3. - assert_noop!(Identity::set_account_id(Origin::signed(4), 0, 3), Error::::InvalidIndex); + assert_noop!( + Identity::set_account_id(Origin::signed(4), 0, 3), + Error::::InvalidIndex + ); // account 3 can, because that's the registrar's current account. assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); // account 4 can now, because that's their new ID. diff --git a/substrate/frame/identity/src/types.rs b/substrate/frame/identity/src/types.rs index 59781aadbd31e61cec4b37a9c03689118694a288..38bd6458a4881b8526f9822cb017e6cd63fb526e 100644 --- a/substrate/frame/identity/src/types.rs +++ b/substrate/frame/identity/src/types.rs @@ -15,19 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use codec::{Encode, Decode, MaxEncodedLen}; +use super::*; +use codec::{Decode, Encode, MaxEncodedLen}; use enumflags2::BitFlags; use frame_support::{ - traits::{ConstU32, Get}, - BoundedVec, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, -}; -use sp_std::prelude::*; -use sp_std::{fmt::Debug, iter::once, ops::Add}; -use sp_runtime::{ - traits::Zero, - RuntimeDebug, + traits::{ConstU32, Get}, + BoundedVec, CloneNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; -use super::*; +use sp_runtime::{traits::Zero, RuntimeDebug}; +use sp_std::{fmt::Debug, iter::once, ops::Add, prelude::*}; /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater /// than 32-bytes then it will be truncated when encoding. @@ -58,13 +54,13 @@ impl Decode for Data { let b = input.read_byte()?; Ok(match b { 0 => Data::None, - n @ 1 ..= 33 => { + n @ 1..=33 => { let mut r: BoundedVec<_, _> = vec![0u8; n as usize - 1] .try_into() .expect("bound checked in match arm condition; qed"); input.read(&mut r[..])?; Data::Raw(r) - } + }, 34 => Data::BlakeTwo256(<[u8; 32]>::decode(input)?), 35 => Data::Sha256(<[u8; 32]>::decode(input)?), 36 => Data::Keccak256(<[u8; 32]>::decode(input)?), @@ -83,7 +79,7 @@ impl Encode for Data { let mut r = vec![l as u8 + 1; l + 1]; r[1..].copy_from_slice(&x[..l as usize]); r - } + }, Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(), Data::Sha256(ref h) => once(35u8).chain(h.iter().cloned()).collect(), Data::Keccak256(ref h) => once(36u8).chain(h.iter().cloned()).collect(), @@ -107,9 +103,8 @@ pub type RegistrarIndex = u32; /// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear /// which fields their attestation is relevant for by off-chain means. #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)] -pub enum Judgement< - Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq -> { +pub enum Judgement +{ /// The default value; no opinion is held. Unknown, /// No judgement is yet in place, but a deposit is reserved as payment for providing one. @@ -131,9 +126,9 @@ pub enum Judgement< Erroneous, } -impl< - Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq -> Judgement { +impl + Judgement +{ /// Returns `true` if this judgement is indicative of a deposit being currently held. This means /// it should not be cleared or replaced except by an operation which utilizes the deposit. pub(crate) fn has_deposit(&self) -> bool { @@ -159,14 +154,14 @@ impl< #[repr(u64)] #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, BitFlags, RuntimeDebug)] pub enum IdentityField { - Display = 0b0000000000000000000000000000000000000000000000000000000000000001, - Legal = 0b0000000000000000000000000000000000000000000000000000000000000010, - Web = 0b0000000000000000000000000000000000000000000000000000000000000100, - Riot = 0b0000000000000000000000000000000000000000000000000000000000001000, - Email = 0b0000000000000000000000000000000000000000000000000000000000010000, + Display = 0b0000000000000000000000000000000000000000000000000000000000000001, + Legal = 0b0000000000000000000000000000000000000000000000000000000000000010, + Web = 0b0000000000000000000000000000000000000000000000000000000000000100, + Riot = 0b0000000000000000000000000000000000000000000000000000000000001000, + Email = 0b0000000000000000000000000000000000000000000000000000000000010000, PgpFingerprint = 0b0000000000000000000000000000000000000000000000000000000000100000, - Image = 0b0000000000000000000000000000000000000000000000000000000001000000, - Twitter = 0b0000000000000000000000000000000000000000000000000000000010000000, + Image = 0b0000000000000000000000000000000000000000000000000000000001000000, + Twitter = 0b0000000000000000000000000000000000000000000000000000000010000000, } impl MaxEncodedLen for IdentityField { @@ -202,7 +197,9 @@ impl Decode for IdentityFields { /// /// NOTE: This should be stored at the end of the storage item to facilitate the addition of extra /// fields in a backwards compatible way through a specialized `Decode` impl. -#[derive(CloneNoBound, Encode, Decode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound)] +#[derive( + CloneNoBound, Encode, Decode, Eq, MaxEncodedLen, PartialEqNoBound, RuntimeDebugNoBound, +)] #[codec(mel_bound(FieldLimit: Get))] #[cfg_attr(test, derive(frame_support::DefaultNoBound))] pub struct IdentityInfo> { @@ -277,23 +274,27 @@ pub struct Registration< pub info: IdentityInfo, } -impl < - Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq + Zero + Add, - MaxJudgements: Get, - MaxAdditionalFields: Get, -> Registration { +impl< + Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq + Zero + Add, + MaxJudgements: Get, + MaxAdditionalFields: Get, + > Registration +{ pub(crate) fn total_deposit(&self) -> Balance { - self.deposit + self.judgements.iter() - .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) - .fold(Zero::zero(), |a, i| a + i) + self.deposit + + self.judgements + .iter() + .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) + .fold(Zero::zero(), |a, i| a + i) } } impl< - Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq, - MaxJudgements: Get, - MaxAdditionalFields: Get, -> Decode for Registration { + Balance: Encode + Decode + MaxEncodedLen + Copy + Clone + Debug + Eq + PartialEq, + MaxJudgements: Get, + MaxAdditionalFields: Get, + > Decode for Registration +{ fn decode(input: &mut I) -> sp_std::result::Result { let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; Ok(Self { judgements, deposit, info }) @@ -304,7 +305,7 @@ impl< #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, MaxEncodedLen)] pub struct RegistrarInfo< Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, - AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq + AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq, > { /// The account of the registrar. pub account: AccountId, diff --git a/substrate/frame/identity/src/weights.rs b/substrate/frame/identity/src/weights.rs index f283b2869bdfc6cf1567afbb6472f0819b3952e2..b23df125c23b559e666f64ec14ae451d83e5f592 100644 --- a/substrate/frame/identity/src/weights.rs +++ b/substrate/frame/identity/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/im-online/src/benchmarking.rs b/substrate/frame/im-online/src/benchmarking.rs index 5ab4d16c7fe087c713b0da31331aa51eaaf151f6..ec53ec534850cb44e36cb166e46586aabf5f3f3f 100644 --- a/substrate/frame/im-online/src/benchmarking.rs +++ b/substrate/frame/im-online/src/benchmarking.rs @@ -21,22 +21,27 @@ use super::*; -use frame_system::RawOrigin; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; -use sp_core::OpaquePeerId; -use sp_core::offchain::OpaqueMultiaddr; -use sp_runtime::traits::{ValidateUnsigned, Zero}; -use sp_runtime::transaction_validity::TransactionSource; use frame_support::traits::UnfilteredDispatchable; +use frame_system::RawOrigin; +use sp_core::{offchain::OpaqueMultiaddr, OpaquePeerId}; +use sp_runtime::{ + traits::{ValidateUnsigned, Zero}, + transaction_validity::TransactionSource, +}; use crate::Pallet as ImOnline; const MAX_KEYS: u32 = 1000; const MAX_EXTERNAL_ADDRESSES: u32 = 100; -pub fn create_heartbeat(k: u32, e: u32) -> - Result<(crate::Heartbeat, ::Signature), &'static str> -{ +pub fn create_heartbeat( + k: u32, + e: u32, +) -> Result< + (crate::Heartbeat, ::Signature), + &'static str, +> { let mut keys = Vec::new(); for _ in 0..k { keys.push(T::AuthorityId::generate_pair(None)); @@ -51,12 +56,12 @@ pub fn create_heartbeat(k: u32, e: u32) -> block_number: T::BlockNumber::zero(), network_state, session_index: 0, - authority_index: k-1, + authority_index: k - 1, validators_len: keys.len() as u32, }; let encoded_heartbeat = input_heartbeat.encode(); - let authority_id = keys.get((k-1) as usize).ok_or("out of range")?; + let authority_id = keys.get((k - 1) as usize).ok_or("out of range")?; let signature = authority_id.sign(&encoded_heartbeat).ok_or("couldn't make signature")?; Ok((input_heartbeat, signature)) @@ -91,9 +96,4 @@ benchmarks! { } } - -impl_benchmark_test_suite!( - ImOnline, - crate::mock::new_test_ext(), - crate::mock::Runtime, -); +impl_benchmark_test_suite!(ImOnline, crate::mock::new_test_ext(), crate::mock::Runtime,); diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index 90ba04f3b60dd6bba6da78be2b67ce1fbac2a153..99500ece837f70673e237eff244cd4ee6c48b2d4 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -69,31 +69,30 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; mod mock; mod tests; -mod benchmarking; pub mod weights; +use codec::{Decode, Encode}; +use frame_support::traits::{ + EstimateNextSessionRotation, OneSessionHandler, ValidatorSet, ValidatorSetWithIdentification, +}; +use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; +pub use pallet::*; use sp_application_crypto::RuntimeAppPublic; -use codec::{Encode, Decode}; use sp_core::offchain::OpaqueNetworkState; -use sp_std::prelude::*; -use sp_std::convert::TryInto; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, traits::{AtLeast32BitUnsigned, Convert, Saturating, TrailingZeroInput}, - Perbill, Permill, PerThing, RuntimeDebug, SaturatedConversion, + PerThing, Perbill, Permill, RuntimeDebug, SaturatedConversion, }; use sp_staking::{ + offence::{Kind, Offence, ReportOffence}, SessionIndex, - offence::{ReportOffence, Offence, Kind}, }; -use frame_support::traits::{ - EstimateNextSessionRotation, OneSessionHandler, ValidatorSet, ValidatorSetWithIdentification, -}; -use frame_system::offchain::{SendTransactionTypes, SubmitTransaction}; +use sp_std::{convert::TryInto, prelude::*}; pub use weights::WeightInfo; -pub use pallet::*; pub mod sr25519 { mod app_sr25519 { @@ -115,7 +114,7 @@ pub mod sr25519 { pub mod ed25519 { mod app_ed25519 { - use sp_application_crypto::{app_crypto, key_types::IM_ONLINE, ed25519}; + use sp_application_crypto::{app_crypto, ed25519, key_types::IM_ONLINE}; app_crypto!(ed25519, IM_ONLINE); } @@ -185,8 +184,7 @@ enum OffchainErr { impl sp_std::fmt::Debug for OffchainErr { fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { match *self { - OffchainErr::TooEarly => - write!(fmt, "Too early to send heartbeat."), + OffchainErr::TooEarly => write!(fmt, "Too early to send heartbeat."), OffchainErr::WaitingForInclusion(ref block) => write!(fmt, "Heartbeat already sent at {:?}. Waiting for inclusion.", block), OffchainErr::AlreadyOnline(auth_idx) => @@ -204,7 +202,8 @@ pub type AuthIndex = u32; /// Heartbeat which is sent/received. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Heartbeat - where BlockNumber: PartialEq + Eq + Decode + Encode, +where + BlockNumber: PartialEq + Eq + Decode + Encode, { /// Block number at the time heartbeat is created.. pub block_number: BlockNumber, @@ -219,31 +218,32 @@ pub struct Heartbeat } /// A type for representing the validator id in a session. -pub type ValidatorId = < - ::ValidatorSet as ValidatorSet<::AccountId> ->::ValidatorId; +pub type ValidatorId = <::ValidatorSet as ValidatorSet< + ::AccountId, +>>::ValidatorId; /// A tuple of (ValidatorId, Identification) where `Identification` is the full identification of `ValidatorId`. pub type IdentificationTuple = ( ValidatorId, - <::ValidatorSet as - ValidatorSetWithIdentification<::AccountId>>::Identification, + <::ValidatorSet as ValidatorSetWithIdentification< + ::AccountId, + >>::Identification, ); type OffchainResult = Result::BlockNumber>>; #[frame_support::pallet] pub mod pallet { - use frame_support::{pallet_prelude::*, traits::Get}; - use frame_system::{pallet_prelude::*, ensure_none}; + use super::*; + use frame_support::{pallet_prelude::*, traits::Get, Parameter}; + use frame_system::{ensure_none, pallet_prelude::*}; use sp_runtime::{ - traits::{Member, MaybeSerializeDeserialize}, + traits::{MaybeSerializeDeserialize, Member}, transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, ValidTransaction, + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, }, }; - use frame_support::Parameter; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -252,7 +252,12 @@ pub mod pallet { #[pallet::config] pub trait Config: SendTransactionTypes> + frame_system::Config { /// The identifier type for an authority. - type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord + MaybeSerializeDeserialize; + type AuthorityId: Member + + Parameter + + RuntimeAppPublic + + Default + + Ord + + MaybeSerializeDeserialize; /// The overarching event type. type Event: From> + IsType<::Event>; @@ -331,14 +336,8 @@ pub mod pallet { /// `offchain::OpaqueNetworkState`. #[pallet::storage] #[pallet::getter(fn received_heartbeats)] - pub(crate) type ReceivedHeartbeats = StorageDoubleMap< - _, - Twox64Concat, - SessionIndex, - Twox64Concat, - AuthIndex, - Vec, - >; + pub(crate) type ReceivedHeartbeats = + StorageDoubleMap<_, Twox64Concat, SessionIndex, Twox64Concat, AuthIndex, Vec>; /// For each session index, we keep a mapping of `ValidatorId` to the /// number of blocks authored by the given authority. @@ -362,9 +361,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - GenesisConfig { - keys: Default::default(), - } + GenesisConfig { keys: Default::default() } } } @@ -402,10 +399,8 @@ pub mod pallet { ensure_none(origin)?; let current_session = T::ValidatorSet::session_index(); - let exists = ReceivedHeartbeats::::contains_key( - ¤t_session, - &heartbeat.authority_index - ); + let exists = + ReceivedHeartbeats::::contains_key(¤t_session, &heartbeat.authority_index); let keys = Keys::::get(); let public = keys.get(heartbeat.authority_index as usize); if let (false, Some(public)) = (exists, public) { @@ -415,7 +410,7 @@ pub mod pallet { ReceivedHeartbeats::::insert( ¤t_session, &heartbeat.authority_index, - &network_state + &network_state, ); Ok(()) @@ -463,19 +458,19 @@ pub mod pallet { if let Call::heartbeat(heartbeat, signature) = call { if >::is_online(heartbeat.authority_index) { // we already received a heartbeat for this authority - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // check if session index from heartbeat is recent let current_session = T::ValidatorSet::session_index(); if heartbeat.session_index != current_session { - return InvalidTransaction::Stale.into(); + return InvalidTransaction::Stale.into() } // verify that the incoming (unverified) pubkey is actually an authority id let keys = Keys::::get(); if keys.len() as u32 != heartbeat.validators_len { - return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into(); + return InvalidTransaction::Custom(INVALID_VALIDATORS_LEN).into() } let authority_id = match keys.get(heartbeat.authority_index as usize) { Some(id) => id, @@ -488,7 +483,7 @@ pub mod pallet { }); if !signature_valid { - return InvalidTransaction::BadProof.into(); + return InvalidTransaction::BadProof.into() } ValidTransaction::with_tag_prefix("ImOnline") @@ -511,9 +506,8 @@ pub mod pallet { /// Keep track of number of authored blocks per authority, uncles are counted as /// well since they're a valid proof of being online. -impl< - T: Config + pallet_authorship::Config, -> pallet_authorship::EventHandler, T::BlockNumber> for Pallet +impl + pallet_authorship::EventHandler, T::BlockNumber> for Pallet { fn note_author(author: ValidatorId) { Self::note_authorship(author); @@ -533,7 +527,7 @@ impl Pallet { let current_validators = T::ValidatorSet::validators(); if authority_index >= current_validators.len() as u32 { - return false; + return false } let authority = ¤t_validators[authority_index as usize]; @@ -545,10 +539,7 @@ impl Pallet { let current_session = T::ValidatorSet::session_index(); ReceivedHeartbeats::::contains_key(¤t_session, &authority_index) || - AuthoredBlocks::::get( - ¤t_session, - authority, - ) != 0 + AuthoredBlocks::::get(¤t_session, authority) != 0 } /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in @@ -562,11 +553,7 @@ impl Pallet { fn note_authorship(author: ValidatorId) { let current_session = T::ValidatorSet::session_index(); - AuthoredBlocks::::mutate( - ¤t_session, - author, - |authored| *authored += 1, - ); + AuthoredBlocks::::mutate(¤t_session, author, |authored| *authored += 1); } pub(crate) fn send_heartbeats( @@ -602,8 +589,8 @@ impl Pallet { // haven't sent an heartbeat yet we'll send one unconditionally. the idea is to prevent // all nodes from sending the heartbeats at the same block and causing a temporary (but // deterministic) spike in transactions. - progress >= START_HEARTBEAT_FINAL_PERIOD - || progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) + progress >= START_HEARTBEAT_FINAL_PERIOD || + progress >= START_HEARTBEAT_RANDOM_PERIOD && random_choice(progress) } else { // otherwise we fallback to using the block number calculated at the beginning // of the session that should roughly correspond to the middle of the session @@ -612,23 +599,21 @@ impl Pallet { }; if !should_heartbeat { - return Err(OffchainErr::TooEarly); + return Err(OffchainErr::TooEarly) } let session_index = T::ValidatorSet::session_index(); let validators_len = Keys::::decode_len().unwrap_or_default() as u32; - Ok( - Self::local_authority_keys().map(move |(authority_index, key)| { - Self::send_single_heartbeat( - authority_index, - key, - session_index, - block_number, - validators_len, - ) - }), - ) + Ok(Self::local_authority_keys().map(move |(authority_index, key)| { + Self::send_single_heartbeat( + authority_index, + key, + session_index, + block_number, + validators_len, + ) + })) } fn send_single_heartbeat( @@ -640,8 +625,8 @@ impl Pallet { ) -> OffchainResult { // A helper function to prepare heartbeat call. let prepare_heartbeat = || -> OffchainResult> { - let network_state = sp_io::offchain::network_state() - .map_err(|_| OffchainErr::NetworkState)?; + let network_state = + sp_io::offchain::network_state().map_err(|_| OffchainErr::NetworkState)?; let heartbeat_data = Heartbeat { block_number, network_state, @@ -656,35 +641,30 @@ impl Pallet { }; if Self::is_online(authority_index) { - return Err(OffchainErr::AlreadyOnline(authority_index)); + return Err(OffchainErr::AlreadyOnline(authority_index)) } // acquire lock for that authority at current heartbeat to make sure we don't // send concurrent heartbeats. - Self::with_heartbeat_lock( - authority_index, - session_index, - block_number, - || { - let call = prepare_heartbeat()?; - log::info!( - target: "runtime::im-online", - "[index: {:?}] Reporting im-online at block: {:?} (session: {:?}): {:?}", - authority_index, - block_number, - session_index, - call, - ); + Self::with_heartbeat_lock(authority_index, session_index, block_number, || { + let call = prepare_heartbeat()?; + log::info!( + target: "runtime::im-online", + "[index: {:?}] Reporting im-online at block: {:?} (session: {:?}): {:?}", + authority_index, + block_number, + session_index, + call, + ); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) - .map_err(|_| OffchainErr::SubmitTransaction)?; + SubmitTransaction::>::submit_unsigned_transaction(call.into()) + .map_err(|_| OffchainErr::SubmitTransaction)?; - Ok(()) - }, - ) + Ok(()) + }) } - fn local_authority_keys() -> impl Iterator { + fn local_authority_keys() -> impl Iterator { // on-chain storage // // At index `idx`: @@ -699,13 +679,12 @@ impl Pallet { local_keys.sort(); - authorities.into_iter() - .enumerate() - .filter_map(move |(index, authority)| { - local_keys.binary_search(&authority) - .ok() - .map(|location| (index as u32, local_keys[location].clone())) - }) + authorities.into_iter().enumerate().filter_map(move |(index, authority)| { + local_keys + .binary_search(&authority) + .ok() + .map(|location| (index as u32, local_keys[location].clone())) + }) } fn with_heartbeat_lock( @@ -722,24 +701,21 @@ impl Pallet { let storage = StorageValueRef::persistent(&key); let res = storage.mutate( |status: Result>, StorageRetrievalError>| { - // Check if there is already a lock for that particular block. - // This means that the heartbeat has already been sent, and we are just waiting - // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD - // we will re-send it. - match status { - // we are still waiting for inclusion. - Ok(Some(status)) if status.is_recent(session_index, now) => { - Err(OffchainErr::WaitingForInclusion(status.sent_at)) - }, - // attempt to set new status - _ => Ok(HeartbeatStatus { - session_index, - sent_at: now, - }), - } - }); + // Check if there is already a lock for that particular block. + // This means that the heartbeat has already been sent, and we are just waiting + // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD + // we will re-send it. + match status { + // we are still waiting for inclusion. + Ok(Some(status)) if status.is_recent(session_index, now) => + Err(OffchainErr::WaitingForInclusion(status.sent_at)), + // attempt to set new status + _ => Ok(HeartbeatStatus { session_index, sent_at: now }), + } + }, + ); if let Err(MutateStorageError::ValueFunctionFailed(err)) = res { - return Err(err); + return Err(err) } let mut new_status = res.map_err(|_| OffchainErr::FailedToAcquireLock)?; @@ -777,14 +753,16 @@ impl OneSessionHandler for Pallet { type Key = T::AuthorityId; fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator + where + I: Iterator, { let keys = validators.map(|x| x.1).collect::>(); Self::initialize_keys(&keys); } fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, _queued_validators: I) - where I: Iterator + where + I: Iterator, { // Tell the offchain worker to start making the next session's heartbeats. // Since we consider producing blocks as being online, @@ -802,14 +780,16 @@ impl OneSessionHandler for Pallet { let keys = Keys::::get(); let current_validators = T::ValidatorSet::validators(); - let offenders = current_validators.into_iter().enumerate() - .filter(|(index, id)| - !Self::is_online_aux(*index as u32, id) - ).filter_map(|(_, id)| + let offenders = current_validators + .into_iter() + .enumerate() + .filter(|(index, id)| !Self::is_online_aux(*index as u32, id)) + .filter_map(|(_, id)| { >::IdentificationOf::convert( id.clone() ).map(|full_id| (id, full_id)) - ).collect::>>(); + }) + .collect::>>(); // Remove all received heartbeats and number of authored blocks from the // current session, they have already been processed and won't be needed diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index 3d7d6d73cd83a3f5f3a36db75ddeec5c9233896e..a04da49c6526db56f853aacf4eff52567c7e50d9 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -24,9 +24,11 @@ use std::cell::RefCell; use frame_support::{parameter_types, weights::Weight}; use pallet_session::historical as pallet_session_historical; use sp_core::H256; -use sp_runtime::testing::{Header, TestXt, UintAuthorityId}; -use sp_runtime::traits::{BlakeTwo256, ConvertInto, IdentityLookup}; -use sp_runtime::{Perbill, Permill}; +use sp_runtime::{ + testing::{Header, TestXt, UintAuthorityId}, + traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + Perbill, Permill, +}; use sp_staking::{ offence::{OffenceError, ReportOffence}, SessionIndex, @@ -70,13 +72,11 @@ impl pallet_session::SessionManager for TestSessionManager { impl pallet_session::historical::SessionManager for TestSessionManager { fn new_session(_new_index: SessionIndex) -> Option> { - VALIDATORS.with(|l| l - .borrow_mut() - .take() - .map(|validators| { - validators.iter().map(|v| (*v, *v)).collect() - }) - ) + VALIDATORS.with(|l| { + l.borrow_mut() + .take() + .map(|validators| validators.iter().map(|v| (*v, *v)).collect()) + }) } fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} @@ -105,9 +105,7 @@ impl ReportOffence for OffenceHandler { } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); t.into() } @@ -154,8 +152,9 @@ parameter_types! { impl pallet_session::Config for Runtime { type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type SessionHandler = (ImOnline, ); + type SessionManager = + pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = (ImOnline,); type ValidatorId = u64; type ValidatorIdOf = ConvertInto; type Keys = UintAuthorityId; @@ -230,7 +229,8 @@ impl Config for Runtime { type WeightInfo = (); } -impl frame_system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime +where Call: From, { type OverarchingCall = Call; diff --git a/substrate/frame/im-online/src/tests.rs b/substrate/frame/im-online/src/tests.rs index 5fb8fd3a791e96ffa36d12e0c8281c1af4e9c248..30af2d31fda3a4b015d947d81c497a25b9ec5c40 100644 --- a/substrate/frame/im-online/src/tests.rs +++ b/substrate/frame/im-online/src/tests.rs @@ -21,23 +21,23 @@ use super::*; use crate::mock::*; -use sp_core::OpaquePeerId; -use sp_core::offchain::{ - OffchainDbExt, - OffchainWorkerExt, - TransactionPoolExt, - testing::{TestOffchainExt, TestTransactionPoolExt}, +use frame_support::{assert_noop, dispatch}; +use sp_core::{ + offchain::{ + testing::{TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, + }, + OpaquePeerId, +}; +use sp_runtime::{ + testing::UintAuthorityId, + transaction_validity::{InvalidTransaction, TransactionValidityError}, }; -use frame_support::{dispatch, assert_noop}; -use sp_runtime::{testing::UintAuthorityId, transaction_validity::{TransactionValidityError, InvalidTransaction}}; #[test] fn test_unresponsiveness_slash_fraction() { // A single case of unresponsiveness is not slashed. - assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(1, 50), - Perbill::zero(), - ); + assert_eq!(UnresponsivenessOffence::<()>::slash_fraction(1, 50), Perbill::zero(),); assert_eq!( UnresponsivenessOffence::<()>::slash_fraction(5, 50), @@ -75,17 +75,17 @@ fn should_report_offline_validators() { // then let offences = OFFENCES.with(|l| l.replace(vec![])); - assert_eq!(offences, vec![ - (vec![], UnresponsivenessOffence { - session_index: 2, - validator_set_count: 3, - offenders: vec![ - (1, 1), - (2, 2), - (3, 3), - ], - }) - ]); + assert_eq!( + offences, + vec![( + vec![], + UnresponsivenessOffence { + session_index: 2, + validator_set_count: 3, + offenders: vec![(1, 1), (2, 2), (3, 3),], + } + )] + ); // should not report when heartbeat is sent for (idx, v) in validators.into_iter().take(4).enumerate() { @@ -95,16 +95,17 @@ fn should_report_offline_validators() { // then let offences = OFFENCES.with(|l| l.replace(vec![])); - assert_eq!(offences, vec![ - (vec![], UnresponsivenessOffence { - session_index: 3, - validator_set_count: 6, - offenders: vec![ - (5, 5), - (6, 6), - ], - }) - ]); + assert_eq!( + offences, + vec![( + vec![], + UnresponsivenessOffence { + session_index: 3, + validator_set_count: 6, + offenders: vec![(5, 5), (6, 6),], + } + )] + ); }); } @@ -129,17 +130,15 @@ fn heartbeat( }; let signature = id.sign(&heartbeat.encode()).unwrap(); - ImOnline::pre_dispatch(&crate::Call::heartbeat(heartbeat.clone(), signature.clone())) - .map_err(|e| match e { - TransactionValidityError::Invalid(InvalidTransaction::Custom(INVALID_VALIDATORS_LEN)) => - "invalid validators len", + ImOnline::pre_dispatch(&crate::Call::heartbeat(heartbeat.clone(), signature.clone())).map_err( + |e| match e { + TransactionValidityError::Invalid(InvalidTransaction::Custom( + INVALID_VALIDATORS_LEN, + )) => "invalid validators len", e @ _ => <&'static str>::from(e), - })?; - ImOnline::heartbeat( - Origin::none(), - heartbeat, - signature, - ) + }, + )?; + ImOnline::heartbeat(Origin::none(), heartbeat, signature) } #[test] @@ -191,8 +190,14 @@ fn late_heartbeat_and_invalid_keys_len_should_fail() { assert_eq!(Session::validators(), vec![1, 2, 3]); // when - assert_noop!(heartbeat(1, 3, 0, 1.into(), Session::validators()), "Transaction is outdated"); - assert_noop!(heartbeat(1, 1, 0, 1.into(), Session::validators()), "Transaction is outdated"); + assert_noop!( + heartbeat(1, 3, 0, 1.into(), Session::validators()), + "Transaction is outdated" + ); + assert_noop!( + heartbeat(1, 1, 0, 1.into(), Session::validators()), + "Transaction is outdated" + ); // invalid validators_len assert_noop!(heartbeat(1, 2, 0, 1.into(), vec![]), "invalid validators len"); @@ -236,13 +241,16 @@ fn should_generate_heartbeats() { e => panic!("Unexpected call: {:?}", e), }; - assert_eq!(heartbeat, Heartbeat { - block_number: block, - network_state: sp_io::offchain::network_state().unwrap(), - session_index: 2, - authority_index: 2, - validators_len: 3, - }); + assert_eq!( + heartbeat, + Heartbeat { + block_number: block, + network_state: sp_io::offchain::network_state().unwrap(), + session_index: 2, + authority_index: 2, + validators_len: 3, + } + ); }); } @@ -348,13 +356,16 @@ fn should_not_send_a_report_if_already_online() { e => panic!("Unexpected call: {:?}", e), }; - assert_eq!(heartbeat, Heartbeat { - block_number: 4, - network_state: sp_io::offchain::network_state().unwrap(), - session_index: 2, - authority_index: 0, - validators_len: 3, - }); + assert_eq!( + heartbeat, + Heartbeat { + block_number: 4, + network_state: sp_io::offchain::network_state().unwrap(), + session_index: 2, + authority_index: 0, + validators_len: 3, + } + ); }); } @@ -424,10 +435,7 @@ fn should_handle_non_linear_session_progress() { // if we don't have valid results for the current session progres then // we'll fallback to `HeartbeatAfter` and only heartbeat on block 5. MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); - assert_eq!( - ImOnline::send_heartbeats(2).err(), - Some(OffchainErr::TooEarly), - ); + assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly),); MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(None)); assert!(ImOnline::send_heartbeats(5).ok().is_some()); @@ -453,11 +461,9 @@ fn test_does_not_heartbeat_early_in_the_session() { ext.execute_with(|| { // mock current session progress as being 5%. we only randomly start // heartbeating after 10% of the session has elapsed. - MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(0.05)))); - assert_eq!( - ImOnline::send_heartbeats(2).err(), - Some(OffchainErr::TooEarly), - ); + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(0.05)))); + assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly),); }); } @@ -475,8 +481,8 @@ fn test_probability_of_heartbeating_increases_with_session_progress() { // the average session length is 100 blocks, therefore the residual // probability of sending a heartbeat is 1% MOCK_AVERAGE_SESSION_LENGTH.with(|p| *p.borrow_mut() = Some(100)); - MOCK_CURRENT_SESSION_PROGRESS.with(|p| *p.borrow_mut() = - Some(Some(Permill::from_float(progress)))); + MOCK_CURRENT_SESSION_PROGRESS + .with(|p| *p.borrow_mut() = Some(Some(Permill::from_float(progress)))); let mut seed = [0u8; 32]; let encoded = ((random * Permill::ACCURACY as f64) as u32).encode(); @@ -486,10 +492,7 @@ fn test_probability_of_heartbeating_increases_with_session_progress() { let assert_too_early = |progress, random| { set_test(progress, random); - assert_eq!( - ImOnline::send_heartbeats(2).err(), - Some(OffchainErr::TooEarly), - ); + assert_eq!(ImOnline::send_heartbeats(2).err(), Some(OffchainErr::TooEarly),); }; let assert_heartbeat_ok = |progress, random| { diff --git a/substrate/frame/im-online/src/weights.rs b/substrate/frame/im-online/src/weights.rs index 6a1f575b856c52b0a60409f0229fbefc089bbe3e..5f04a3637d1642e1d378a0dd8a5a445cb42f68a7 100644 --- a/substrate/frame/im-online/src/weights.rs +++ b/substrate/frame/im-online/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/indices/src/benchmarking.rs b/substrate/frame/indices/src/benchmarking.rs index 625a994af38f674574716ec4b483f2d75a78bd7e..6829a660516018deeae8271bb605dcdf14b3a7d2 100644 --- a/substrate/frame/indices/src/benchmarking.rs +++ b/substrate/frame/indices/src/benchmarking.rs @@ -20,8 +20,8 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use crate::Pallet as Indices; @@ -93,9 +93,4 @@ benchmarks! { // TODO in another PR: lookup and unlookup trait weights (not critical) } - -impl_benchmark_test_suite!( - Indices, - crate::mock::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/substrate/frame/indices/src/lib.rs b/substrate/frame/indices/src/lib.rs index 778173dbc971f742233526671cc099325b73fd10..ced8c1e0616524f3883205d6c63ad0eca767184f 100644 --- a/substrate/frame/indices/src/lib.rs +++ b/substrate/frame/indices/src/lib.rs @@ -20,36 +20,43 @@ #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; mod mock; mod tests; -mod benchmarking; pub mod weights; -use sp_std::prelude::*; use codec::Codec; -use sp_runtime::MultiAddress; -use sp_runtime::traits::{ - StaticLookup, LookupError, Zero, Saturating, AtLeast32Bit +use frame_support::traits::{BalanceStatus::Reserved, Currency, ReservableCurrency}; +use sp_runtime::{ + traits::{AtLeast32Bit, LookupError, Saturating, StaticLookup, Zero}, + MultiAddress, }; -use frame_support::traits::{Currency, ReservableCurrency, BalanceStatus::Reserved}; +use sp_std::prelude::*; pub use weights::WeightInfo; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; /// The module's config trait. #[pallet::config] pub trait Config: frame_system::Config { /// Type used for storing an account's index; implies the maximum number of accounts the system /// can hold. - type AccountIndex: Parameter + Member + MaybeSerializeDeserialize + Codec + Default + AtLeast32Bit + Copy; + type AccountIndex: Parameter + + Member + + MaybeSerializeDeserialize + + Codec + + Default + + AtLeast32Bit + + Copy; /// The currency trait. type Currency: ReservableCurrency; @@ -263,7 +270,7 @@ pub mod pallet { } /// Old name generated by `decl_event`. - #[deprecated(note="use `Event` instead")] + #[deprecated(note = "use `Event` instead")] pub type RawEvent = Event; #[pallet::error] @@ -282,11 +289,8 @@ pub mod pallet { /// The lookup from index to account. #[pallet::storage] - pub type Accounts = StorageMap< - _, Blake2_128Concat, - T::AccountIndex, - (T::AccountId, BalanceOf, bool) - >; + pub type Accounts = + StorageMap<_, Blake2_128Concat, T::AccountIndex, (T::AccountId, BalanceOf, bool)>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -296,9 +300,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { - indices: Default::default(), - } + Self { indices: Default::default() } } } @@ -321,9 +323,7 @@ impl Pallet { } /// Lookup an address to get an Id, if there's one there. - pub fn lookup_address( - a: MultiAddress - ) -> Option { + pub fn lookup_address(a: MultiAddress) -> Option { match a { MultiAddress::Id(i) => Some(i), MultiAddress::Index(i) => Self::lookup_index(i), diff --git a/substrate/frame/indices/src/mock.rs b/substrate/frame/indices/src/mock.rs index 46c1d814acb6b5d4c73d4c487eaccdf531573aff..e026e36bc389db60ccbdef45e3065b2d6626d1fa 100644 --- a/substrate/frame/indices/src/mock.rs +++ b/substrate/frame/indices/src/mock.rs @@ -19,10 +19,10 @@ #![cfg(test)] -use sp_runtime::testing::Header; -use sp_core::H256; -use frame_support::parameter_types; use crate::{self as pallet_indices, Config}; +use frame_support::parameter_types; +use sp_core::H256; +use sp_runtime::testing::Header; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -101,8 +101,10 @@ impl Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ + pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/substrate/frame/indices/src/tests.rs b/substrate/frame/indices/src/tests.rs index 96b8c4acfcd2d722aedd22aa57b2931c9bd8a741..37df20e9b928874e17dd4db6b63afd0342edfd6b 100644 --- a/substrate/frame/indices/src/tests.rs +++ b/substrate/frame/indices/src/tests.rs @@ -19,15 +19,17 @@ #![cfg(test)] -use super::*; -use super::mock::*; -use frame_support::{assert_ok, assert_noop}; +use super::{mock::*, *}; +use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; #[test] fn claiming_should_work() { new_test_ext().execute_with(|| { - assert_noop!(Indices::claim(Some(0).into(), 0), BalancesError::::InsufficientBalance); + assert_noop!( + Indices::claim(Some(0).into(), 0), + BalancesError::::InsufficientBalance + ); assert_ok!(Indices::claim(Some(1).into(), 0)); assert_noop!(Indices::claim(Some(2).into(), 0), Error::::InUse); assert_eq!(Balances::reserved_balance(1), 1); diff --git a/substrate/frame/indices/src/weights.rs b/substrate/frame/indices/src/weights.rs index 559392d3d2ba204ae174aa62b5bdec667ad44cbc..6c49615a8521615f438e4e53860a7ccafdce1ab9 100644 --- a/substrate/frame/indices/src/weights.rs +++ b/substrate/frame/indices/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/lottery/src/benchmarking.rs b/substrate/frame/lottery/src/benchmarking.rs index 8fe91088b84ea5c704af4d53c01ba2459a943a36..cf58a5f81b10dc129d5a813de933d4c85ad8c555 100644 --- a/substrate/frame/lottery/src/benchmarking.rs +++ b/substrate/frame/lottery/src/benchmarking.rs @@ -21,9 +21,9 @@ use super::*; -use frame_system::RawOrigin; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_support::traits::{EnsureOrigin, OnInitialize, UnfilteredDispatchable}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; +use frame_system::RawOrigin; use sp_runtime::traits::{Bounded, Zero}; use crate::Pallet as Lottery; @@ -170,8 +170,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Lottery, - crate::mock::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(Lottery, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/substrate/frame/lottery/src/lib.rs b/substrate/frame/lottery/src/lib.rs index c979500b36f06fe1b563fd5c90bf7486a906568b..e2e56860e6051e7e37fdcb057b8f9ffaa9333722 100644 --- a/substrate/frame/lottery/src/lib.rs +++ b/substrate/frame/lottery/src/lib.rs @@ -47,30 +47,30 @@ #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -mod benchmarking; pub mod weights; -use sp_std::prelude::*; +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{DispatchResult, Dispatchable, GetDispatchInfo}, + ensure, + traits::{Currency, ExistenceRequirement::KeepAlive, Get, Randomness, ReservableCurrency}, + PalletId, RuntimeDebug, +}; +pub use pallet::*; use sp_runtime::{ - DispatchError, ArithmeticError, traits::{AccountIdConversion, Saturating, Zero}, + ArithmeticError, DispatchError, }; -use frame_support::{ - ensure, PalletId, RuntimeDebug, - dispatch::{Dispatchable, DispatchResult, GetDispatchInfo}, - traits::{ - Currency, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, Randomness, - }, -}; -use codec::{Encode, Decode}; +use sp_std::prelude::*; pub use weights::WeightInfo; -pub use pallet::*; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; // Any runtime call can be encoded into two bytes which represent the pallet and call index. // We use this to uniquely match someone's incoming call with the calls configured for the lottery. @@ -96,7 +96,9 @@ pub trait ValidateCall { } impl ValidateCall for () { - fn validate_call(_: &::Call) -> bool { false } + fn validate_call(_: &::Call) -> bool { + false + } } impl ValidateCall for Pallet { @@ -112,9 +114,9 @@ impl ValidateCall for Pallet { #[frame_support::pallet] pub mod pallet { - use frame_support::{Parameter, pallet_prelude::*, traits::EnsureOrigin, weights::Weight}; - use frame_system::{ensure_signed, pallet_prelude::*}; use super::*; + use frame_support::{pallet_prelude::*, traits::EnsureOrigin, weights::Weight, Parameter}; + use frame_system::{ensure_signed, pallet_prelude::*}; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -128,7 +130,10 @@ pub mod pallet { type PalletId: Get; /// A dispatchable call. - type Call: Parameter + Dispatchable + GetDispatchInfo + From>; + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From>; /// The currency trait. type Currency: ReservableCurrency; @@ -200,16 +205,13 @@ pub mod pallet { /// The configuration for the current lottery. #[pallet::storage] - pub(crate) type Lottery = StorageValue<_, LotteryConfig>>; + pub(crate) type Lottery = + StorageValue<_, LotteryConfig>>; /// Users who have purchased a ticket. (Lottery Index, Tickets Purchased) #[pallet::storage] - pub(crate) type Participants = StorageMap< - _, - Twox64Concat, T::AccountId, - (u32, Vec), - ValueQuery, - >; + pub(crate) type Participants = + StorageMap<_, Twox64Concat, T::AccountId, (u32, Vec), ValueQuery>; /// Total number of tickets sold. #[pallet::storage] @@ -232,9 +234,8 @@ pub mod pallet { fn on_initialize(n: T::BlockNumber) -> Weight { Lottery::::mutate(|mut lottery| -> Weight { if let Some(config) = &mut lottery { - let payout_block = config.start - .saturating_add(config.length) - .saturating_add(config.delay); + let payout_block = + config.start.saturating_add(config.length).saturating_add(config.delay); if payout_block <= n { let (lottery_account, lottery_balance) = Self::pot(); let ticket_count = TicketsCount::::get(); @@ -242,7 +243,12 @@ pub mod pallet { let winning_number = Self::choose_winner(ticket_count); let winner = Tickets::::get(winning_number).unwrap_or(lottery_account); // Not much we can do if this fails... - let res = T::Currency::transfer(&Self::account_id(), &winner, lottery_balance, KeepAlive); + let res = T::Currency::transfer( + &Self::account_id(), + &winner, + lottery_balance, + KeepAlive, + ); debug_assert!(res.is_ok()); Self::deposit_event(Event::::Winner(winner, lottery_balance)); @@ -340,13 +346,7 @@ pub mod pallet { let new_index = index.checked_add(1).ok_or(ArithmeticError::Overflow)?; let start = frame_system::Pallet::::block_number(); // Use new_index to more easily track everything with the current state. - *lottery = Some(LotteryConfig { - price, - start, - length, - delay, - repeat, - }); + *lottery = Some(LotteryConfig { price, start, length, delay, repeat }); LotteryIndex::::put(new_index); Ok(()) })?; @@ -389,8 +389,8 @@ impl Pallet { // The existential deposit is not part of the pot so lottery account never gets deleted. fn pot() -> (T::AccountId, BalanceOf) { let account_id = Self::account_id(); - let balance = T::Currency::free_balance(&account_id) - .saturating_sub(T::Currency::minimum_balance()); + let balance = + T::Currency::free_balance(&account_id).saturating_sub(T::Currency::minimum_balance()); (account_id, balance) } @@ -408,7 +408,9 @@ impl Pallet { // Convert a call to it's call index by encoding the call and taking the first two bytes. fn call_to_index(call: &::Call) -> Result { let encoded_call = call.encode(); - if encoded_call.len() < 2 { Err(Error::::EncodingFailed)? } + if encoded_call.len() < 2 { + Err(Error::::EncodingFailed)? + } return Ok((encoded_call[0], encoded_call[1])) } @@ -417,30 +419,39 @@ impl Pallet { // Check the call is valid lottery let config = Lottery::::get().ok_or(Error::::NotConfigured)?; let block_number = frame_system::Pallet::::block_number(); - ensure!(block_number < config.start.saturating_add(config.length), Error::::AlreadyEnded); + ensure!( + block_number < config.start.saturating_add(config.length), + Error::::AlreadyEnded + ); ensure!(T::ValidateCall::validate_call(call), Error::::InvalidCall); let call_index = Self::call_to_index(call)?; let ticket_count = TicketsCount::::get(); let new_ticket_count = ticket_count.checked_add(1).ok_or(ArithmeticError::Overflow)?; // Try to update the participant status - Participants::::try_mutate(&caller, |(lottery_index, participating_calls)| -> DispatchResult { - let index = LotteryIndex::::get(); - // If lottery index doesn't match, then reset participating calls and index. - if *lottery_index != index { - *participating_calls = Vec::new(); - *lottery_index = index; - } else { - // Check that user is not already participating under this call. - ensure!(!participating_calls.iter().any(|c| call_index == *c), Error::::AlreadyParticipating); - } - // Check user has enough funds and send it to the Lottery account. - T::Currency::transfer(caller, &Self::account_id(), config.price, KeepAlive)?; - // Create a new ticket. - TicketsCount::::put(new_ticket_count); - Tickets::::insert(ticket_count, caller.clone()); - participating_calls.push(call_index); - Ok(()) - })?; + Participants::::try_mutate( + &caller, + |(lottery_index, participating_calls)| -> DispatchResult { + let index = LotteryIndex::::get(); + // If lottery index doesn't match, then reset participating calls and index. + if *lottery_index != index { + *participating_calls = Vec::new(); + *lottery_index = index; + } else { + // Check that user is not already participating under this call. + ensure!( + !participating_calls.iter().any(|c| call_index == *c), + Error::::AlreadyParticipating + ); + } + // Check user has enough funds and send it to the Lottery account. + T::Currency::transfer(caller, &Self::account_id(), config.price, KeepAlive)?; + // Create a new ticket. + TicketsCount::::put(new_ticket_count); + Tickets::::insert(ticket_count, caller.clone()); + participating_calls.push(call_index); + Ok(()) + }, + )?; Self::deposit_event(Event::::TicketBought(caller.clone(), call_index)); @@ -452,9 +463,9 @@ impl Pallet { let mut random_number = Self::generate_random_number(0); // Best effort attempt to remove bias from modulus operator. - for i in 1 .. T::MaxGenerateRandom::get() { + for i in 1..T::MaxGenerateRandom::get() { if random_number < u32::MAX - u32::MAX % total { - break; + break } random_number = Self::generate_random_number(i); diff --git a/substrate/frame/lottery/src/mock.rs b/substrate/frame/lottery/src/mock.rs index 885e81bb32ea35d02a5991faa0960b303c4ec07e..253923de0d5e7ca0e0892a91696f9ef604b1ea23 100644 --- a/substrate/frame/lottery/src/mock.rs +++ b/substrate/frame/lottery/src/mock.rs @@ -25,13 +25,13 @@ use frame_support::{ traits::{OnFinalize, OnInitialize}, }; use frame_support_test::TestRandomness; +use frame_system::EnsureRoot; use sp_core::H256; use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -use frame_system::EnsureRoot; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -123,7 +123,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/substrate/frame/lottery/src/tests.rs b/substrate/frame/lottery/src/tests.rs index 38994b2864c62cb6e9b5eab45ecc114be4cfcbc0..800ae223d9739cfc9810f7a6f17c2116f7eefc15 100644 --- a/substrate/frame/lottery/src/tests.rs +++ b/substrate/frame/lottery/src/tests.rs @@ -18,13 +18,12 @@ //! Tests for the module. use super::*; +use frame_support::{assert_noop, assert_ok}; use mock::{ - Lottery, Balances, Test, Origin, Call, SystemCall, BalancesCall, - new_test_ext, run_to_block + new_test_ext, run_to_block, Balances, BalancesCall, Call, Lottery, Origin, SystemCall, Test, }; -use sp_runtime::traits::{BadOrigin}; -use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; +use sp_runtime::traits::BadOrigin; #[test] fn initial_state() { @@ -86,13 +85,7 @@ fn basic_end_to_end_works() { assert_eq!(LotteryIndex::::get(), 2); assert_eq!( crate::Lottery::::get().unwrap(), - LotteryConfig { - price, - start: 25, - length, - delay, - repeat: true, - } + LotteryConfig { price, start: 25, length, delay, repeat: true } ); }); } @@ -184,10 +177,7 @@ fn buy_ticket_works_as_simple_passthrough() { ); let bad_origin_call = Box::new(Call::Balances(BalancesCall::force_transfer(0, 0, 0))); - assert_noop!( - Lottery::buy_ticket(Origin::signed(1), bad_origin_call), - BadOrigin, - ); + assert_noop!(Lottery::buy_ticket(Origin::signed(1), bad_origin_call), BadOrigin,); // User can call other txs, but doesn't get a ticket let remark_call = Box::new(Call::System(SystemCall::remark(b"hello, world!".to_vec()))); @@ -210,7 +200,6 @@ fn buy_ticket_works() { ]; assert_ok!(Lottery::set_calls(Origin::root(), calls)); - // Can't buy ticket before start let call = Box::new(Call::Balances(BalancesCall::transfer(2, 1))); assert_ok!(Lottery::buy_ticket(Origin::signed(1), call.clone())); diff --git a/substrate/frame/lottery/src/weights.rs b/substrate/frame/lottery/src/weights.rs index a73d0b667e351eff2d4c8e26b91529b8dd2fe7e4..1b191ef534595daf7c7c782a318ed07faee1e1f6 100644 --- a/substrate/frame/lottery/src/weights.rs +++ b/substrate/frame/lottery/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/membership/src/lib.rs b/substrate/frame/membership/src/lib.rs index c834ed23659e69453928c9bd87577c0bc7504a8d..ed0c78f82d262bae94046d65775948f2818f515e 100644 --- a/substrate/frame/membership/src/lib.rs +++ b/substrate/frame/membership/src/lib.rs @@ -23,12 +23,12 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, - traits::{ChangeMembers, InitializeMembers, EnsureOrigin, Contains, SortedMembers, Get}, + decl_error, decl_event, decl_module, decl_storage, + traits::{ChangeMembers, Contains, EnsureOrigin, Get, InitializeMembers, SortedMembers}, }; use frame_system::ensure_signed; +use sp_std::prelude::*; pub mod weights; pub use weights::WeightInfo; @@ -321,10 +321,10 @@ impl, I: Instance> SortedMembers for Module { #[cfg(feature = "runtime-benchmarks")] mod benchmark { - use super::{*, Module as Membership}; + use super::{Module as Membership, *}; + use frame_benchmarking::{account, benchmarks_instance, impl_benchmark_test_suite, whitelist}; + use frame_support::{assert_ok, traits::EnsureOrigin}; use frame_system::RawOrigin; - use frame_support::{traits::EnsureOrigin, assert_ok}; - use frame_benchmarking::{benchmarks_instance, whitelist, account, impl_benchmark_test_suite}; const SEED: u32 = 0; @@ -467,10 +467,13 @@ mod tests { use super::*; use crate as pallet_membership; - use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types}; - use sp_core::H256; - use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; + use frame_support::{assert_noop, assert_ok, ord_parameter_types, parameter_types}; use frame_system::EnsureSignedBy; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -572,10 +575,12 @@ mod tests { pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. - pallet_membership::GenesisConfig::{ + pallet_membership::GenesisConfig:: { members: vec![10, 20, 30], - .. Default::default() - }.assimilate_storage(&mut t).unwrap(); + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } @@ -617,7 +622,10 @@ mod tests { fn add_member_works() { new_test_ext().execute_with(|| { assert_noop!(Membership::add_member(Origin::signed(5), 15), BadOrigin); - assert_noop!(Membership::add_member(Origin::signed(1), 10), Error::::AlreadyMember); + assert_noop!( + Membership::add_member(Origin::signed(1), 10), + Error::::AlreadyMember + ); assert_ok!(Membership::add_member(Origin::signed(1), 15)); assert_eq!(Membership::members(), vec![10, 15, 20, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); @@ -628,7 +636,10 @@ mod tests { fn remove_member_works() { new_test_ext().execute_with(|| { assert_noop!(Membership::remove_member(Origin::signed(5), 20), BadOrigin); - assert_noop!(Membership::remove_member(Origin::signed(2), 15), Error::::NotMember); + assert_noop!( + Membership::remove_member(Origin::signed(2), 15), + Error::::NotMember + ); assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_ok!(Membership::remove_member(Origin::signed(2), 20)); assert_eq!(Membership::members(), vec![10, 30]); @@ -642,8 +653,14 @@ mod tests { fn swap_member_works() { new_test_ext().execute_with(|| { assert_noop!(Membership::swap_member(Origin::signed(5), 10, 25), BadOrigin); - assert_noop!(Membership::swap_member(Origin::signed(3), 15, 25), Error::::NotMember); - assert_noop!(Membership::swap_member(Origin::signed(3), 10, 30), Error::::AlreadyMember); + assert_noop!( + Membership::swap_member(Origin::signed(3), 15, 25), + Error::::NotMember + ); + assert_noop!( + Membership::swap_member(Origin::signed(3), 10, 30), + Error::::AlreadyMember + ); assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_ok!(Membership::swap_member(Origin::signed(3), 20, 20)); @@ -673,8 +690,14 @@ mod tests { fn change_key_works() { new_test_ext().execute_with(|| { assert_ok!(Membership::set_prime(Origin::signed(5), 10)); - assert_noop!(Membership::change_key(Origin::signed(3), 25), Error::::NotMember); - assert_noop!(Membership::change_key(Origin::signed(10), 20), Error::::AlreadyMember); + assert_noop!( + Membership::change_key(Origin::signed(3), 25), + Error::::NotMember + ); + assert_noop!( + Membership::change_key(Origin::signed(10), 20), + Error::::AlreadyMember + ); assert_ok!(Membership::change_key(Origin::signed(10), 40)); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); @@ -718,6 +741,8 @@ mod tests { pallet_membership::GenesisConfig:: { members: vec![1, 2, 3, 1], phantom: Default::default(), - }.build_storage().unwrap(); + } + .build_storage() + .unwrap(); } } diff --git a/substrate/frame/membership/src/weights.rs b/substrate/frame/membership/src/weights.rs index 8e2d8bb2661643678f639f03cba3a909e43a4d5c..bd2a09cb534c4780efa3572d0b8085ecc2b3649c 100644 --- a/substrate/frame/membership/src/weights.rs +++ b/substrate/frame/membership/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/merkle-mountain-range/primitives/src/lib.rs b/substrate/frame/merkle-mountain-range/primitives/src/lib.rs index 7b562656a1e040fdb46e0dc36791c9ca8133873e..c556583a9dd1abda85de75e157a6846234ec9a8c 100644 --- a/substrate/frame/merkle-mountain-range/primitives/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/primitives/src/lib.rs @@ -21,7 +21,7 @@ #![warn(missing_docs)] use frame_support::RuntimeDebug; -use sp_runtime::traits::{self, Saturating, One}; +use sp_runtime::traits::{self, One, Saturating}; use sp_std::fmt; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; @@ -55,16 +55,10 @@ impl LeafDataProvider for () { /// current block hash is not available (since the block is not finished yet), /// we use the `parent_hash` here along with parent block number. impl LeafDataProvider for frame_system::Pallet { - type LeafData = ( - ::BlockNumber, - ::Hash - ); + type LeafData = (::BlockNumber, ::Hash); fn leaf_data() -> Self::LeafData { - ( - Self::block_number().saturating_sub(One::one()), - Self::parent_hash() - ) + (Self::block_number().saturating_sub(One::one()), Self::parent_hash()) } } @@ -130,7 +124,8 @@ mod encoding { fn encode_to(&self, dest: &mut T) { match self { Self::Data(l) => l.using_encoded( - |data| Either::<&[u8], &H::Output>::Left(data).encode_to(dest), false + |data| Either::<&[u8], &H::Output>::Left(data).encode_to(dest), + false, ), Self::Hash(h) => Either::<&[u8], &H::Output>::Right(h).encode_to(dest), } @@ -258,7 +253,8 @@ macro_rules! impl_leaf_data_for_tuple { /// Test functions implementation for `Compact, ...)>` #[cfg(test)] -impl Compact, DataOrHash)> where +impl Compact, DataOrHash)> +where H: traits::Hash, A: FullLeaf, B: FullLeaf, @@ -346,7 +342,7 @@ pub struct OpaqueLeaf( /// /// NOTE it DOES NOT include length prefix (like `Vec` encoding would). #[cfg_attr(feature = "std", serde(with = "sp_core::bytes"))] - pub Vec + pub Vec, ); impl OpaqueLeaf { @@ -474,25 +470,21 @@ mod tests { ]; // when - let encoded = cases - .iter() - .map(codec::Encode::encode) - .collect::>(); + let encoded = cases.iter().map(codec::Encode::encode).collect::>(); - let decoded = encoded - .iter() - .map(|x| Test::decode(&mut &**x)) - .collect::>(); + let decoded = encoded.iter().map(|x| Test::decode(&mut &**x)).collect::>(); // then - assert_eq!(decoded, cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>()); + assert_eq!( + decoded, + cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>() + ); // check encoding correctness assert_eq!(&encoded[0], &hex_literal::hex!("00343048656c6c6f20576f726c6421")); assert_eq!( encoded[1].as_slice(), - hex_literal::hex!( - "01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd" - ).as_ref() + hex_literal::hex!("01c3e7ba6b511162fead58f2c8b5764ce869ed1118011ac37392522ed16720bbcd") + .as_ref() ); } @@ -519,10 +511,7 @@ mod tests { // when let c: TestCompact = Compact::new((a.clone(), b.clone())); - let d: TestCompact = Compact::new(( - Test::Hash(a.hash()), - Test::Hash(b.hash()), - )); + let d: TestCompact = Compact::new((Test::Hash(a.hash()), Test::Hash(b.hash()))); // then assert_eq!(c.hash(), d.hash()); @@ -535,35 +524,28 @@ mod tests { let b = Test::Data("".into()); let c: TestCompact = Compact::new((a.clone(), b.clone())); - let d: TestCompact = Compact::new(( - Test::Hash(a.hash()), - Test::Hash(b.hash()), - )); + let d: TestCompact = Compact::new((Test::Hash(a.hash()), Test::Hash(b.hash()))); let cases = vec![c, d.clone()]; // when - let encoded_compact = cases - .iter() - .map(|c| c.using_encoded(|x| x.to_vec(), true)) - .collect::>(); + let encoded_compact = + cases.iter().map(|c| c.using_encoded(|x| x.to_vec(), true)).collect::>(); - let encoded = cases - .iter() - .map(|c| c.using_encoded(|x| x.to_vec(), false)) - .collect::>(); + let encoded = + cases.iter().map(|c| c.using_encoded(|x| x.to_vec(), false)).collect::>(); let decoded_compact = encoded_compact .iter() .map(|x| TestCompact::decode(&mut &**x)) .collect::>(); - let decoded = encoded - .iter() - .map(|x| TestCompact::decode(&mut &**x)) - .collect::>(); + let decoded = encoded.iter().map(|x| TestCompact::decode(&mut &**x)).collect::>(); // then - assert_eq!(decoded, cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>()); + assert_eq!( + decoded, + cases.into_iter().map(Result::<_, codec::Error>::Ok).collect::>() + ); assert_eq!(decoded_compact, vec![Ok(d.clone()), Ok(d.clone())]); } @@ -575,10 +557,7 @@ mod tests { let b = Test::Data("".into()); let c: TestCompact = Compact::new((a.clone(), b.clone())); - let d: TestCompact = Compact::new(( - Test::Hash(a.hash()), - Test::Hash(b.hash()), - )); + let d: TestCompact = Compact::new((Test::Hash(a.hash()), Test::Hash(b.hash()))); let cases = vec![c, d.clone()]; let encoded_compact = cases @@ -587,16 +566,10 @@ mod tests { .map(OpaqueLeaf::from_encoded_leaf) .collect::>(); - let opaque = cases - .iter() - .map(OpaqueLeaf::from_leaf) - .collect::>(); + let opaque = cases.iter().map(OpaqueLeaf::from_leaf).collect::>(); // then - assert_eq!( - encoded_compact, - opaque, - ); + assert_eq!(encoded_compact, opaque,); } #[test] @@ -610,10 +583,7 @@ mod tests { let case3 = a.encode().encode(); // when - let encoded = vec![&case1, &case2] - .into_iter() - .map(|x| x.encode()) - .collect::>(); + let encoded = vec![&case1, &case2].into_iter().map(|x| x.encode()).collect::>(); let decoded = vec![&*encoded[0], &*encoded[1], &*case3] .into_iter() .map(|x| EncodableOpaqueLeaf::decode(&mut &*x)) diff --git a/substrate/frame/merkle-mountain-range/rpc/src/lib.rs b/substrate/frame/merkle-mountain-range/rpc/src/lib.rs index fb46fc6280b8a78246942ab65cd9012095967ef8..4719893778f6a402aa666cf9f7034d7b2ac5106d 100644 --- a/substrate/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/rpc/src/lib.rs @@ -26,14 +26,11 @@ use jsonrpc_core::{Error, ErrorCode, Result}; use jsonrpc_derive::rpc; use serde::{Deserialize, Serialize}; +use pallet_mmr_primitives::{Error as MmrError, Proof}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::Bytes; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT}, -}; -use pallet_mmr_primitives::{Error as MmrError, Proof}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; pub use pallet_mmr_primitives::MmrApi as MmrRuntimeApi; @@ -51,19 +48,12 @@ pub struct LeafProof { impl LeafProof { /// Create new `LeafProof` from given concrete `leaf` and `proof`. - pub fn new( - block_hash: BlockHash, - leaf: Leaf, - proof: Proof, - ) -> Self where + pub fn new(block_hash: BlockHash, leaf: Leaf, proof: Proof) -> Self + where Leaf: Encode, MmrHash: Encode, { - Self { - block_hash, - leaf: Bytes(leaf.encode()), - proof: Bytes(proof.encode()), - } + Self { block_hash, leaf: Bytes(leaf.encode()), proof: Bytes(proof.encode()) } } } @@ -95,21 +85,15 @@ pub struct Mmr { impl Mmr { /// Create new `Mmr` with the given reference to the client. pub fn new(client: Arc) -> Self { - Self { - client, - _marker: Default::default(), - } + Self { client, _marker: Default::default() } } } -impl MmrApi<::Hash,> for Mmr +impl MmrApi<::Hash> for Mmr where Block: BlockT, C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - C::Api: MmrRuntimeApi< - Block, - MmrHash, - >, + C::Api: MmrRuntimeApi, MmrHash: Codec + Send + Sync + 'static, { fn generate_proof( @@ -120,8 +104,7 @@ where let api = self.client.runtime_api(); let block_hash = at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. - self.client.info().best_hash - ); + self.client.info().best_hash); let (leaf, proof) = api .generate_proof_with_context( @@ -202,11 +185,14 @@ mod tests { let expected = LeafProof { block_hash: H256::repeat_byte(0), leaf: Bytes(vec![1_u8, 2, 3, 4].encode()), - proof: Bytes(Proof { - leaf_index: 1, - leaf_count: 9, - items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], - }.encode()), + proof: Bytes( + Proof { + leaf_index: 1, + leaf_count: 9, + items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], + } + .encode(), + ), }; // when @@ -218,6 +204,5 @@ mod tests { // then assert_eq!(actual, expected); - } } diff --git a/substrate/frame/merkle-mountain-range/src/benchmarking.rs b/substrate/frame/merkle-mountain-range/src/benchmarking.rs index af7531a00bdc41aec36c81c9d10d38c5e9d762f6..97a880b222ec7187af78bf3cfdb6d6ad17298499 100644 --- a/substrate/frame/merkle-mountain-range/src/benchmarking.rs +++ b/substrate/frame/merkle-mountain-range/src/benchmarking.rs @@ -20,8 +20,8 @@ #![cfg_attr(not(feature = "std"), no_std)] use crate::*; -use frame_support::traits::OnInitialize; use frame_benchmarking::{benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_support::traits::OnInitialize; benchmarks_instance_pallet! { on_initialize { @@ -37,8 +37,4 @@ benchmarks_instance_pallet! { } } -impl_benchmark_test_suite!( - Pallet, - crate::tests::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::mock::Test,); diff --git a/substrate/frame/merkle-mountain-range/src/default_weights.rs b/substrate/frame/merkle-mountain-range/src/default_weights.rs index 98bb404e3f3a1a7d9c8373c5a8434b75f7994815..6308975ce7d22e6901f158b9d0a9f04493454572 100644 --- a/substrate/frame/merkle-mountain-range/src/default_weights.rs +++ b/substrate/frame/merkle-mountain-range/src/default_weights.rs @@ -19,7 +19,8 @@ //! This file was not auto-generated. use frame_support::weights::{ - Weight, constants::{WEIGHT_PER_NANOS, RocksDbWeight as DbWeight}, + constants::{RocksDbWeight as DbWeight, WEIGHT_PER_NANOS}, + Weight, }; impl crate::WeightInfo for () { @@ -34,9 +35,6 @@ impl crate::WeightInfo for () { leaf_weight .saturating_add(hash_weight) .saturating_add(hook_weight) - .saturating_add(DbWeight::get().reads_writes( - 2 + peaks, - 2 + peaks, - )) + .saturating_add(DbWeight::get().reads_writes(2 + peaks, 2 + peaks)) } } diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index 307326b59b6575efb10efb337a22bea1ab4342d6..974b868f6105803158328d6c81c3a90071bfeda3 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -40,38 +40,37 @@ //! //! ## What for? //! -//! Primary use case for this pallet is to generate MMR root hashes, that can latter on be used by -//! BEEFY protocol (see ). -//! MMR root hashes along with BEEFY will make it possible to build Super Light Clients (SLC) of -//! Substrate-based chains. The SLC will be able to follow finality and can be shown proofs of more -//! details that happened on the source chain. -//! In that case the chain which contains the pallet generates the Root Hashes and Proofs, which -//! are then presented to another chain acting as a light client which can verify them. +//! Primary use case for this pallet is to generate MMR root hashes, that can latter on be used by +//! BEEFY protocol (see ). +//! MMR root hashes along with BEEFY will make it possible to build Super Light Clients (SLC) of +//! Substrate-based chains. The SLC will be able to follow finality and can be shown proofs of more +//! details that happened on the source chain. +//! In that case the chain which contains the pallet generates the Root Hashes and Proofs, which +//! are then presented to another chain acting as a light client which can verify them. //! -//! Secondary use case is to archive historical data, but still be able to retrieve them on-demand -//! if needed. For instance if parent block hashes are stored in the MMR it's possible at any point -//! in time to provide a MMR proof about some past block hash, while this data can be safely pruned -//! from on-chain storage. +//! Secondary use case is to archive historical data, but still be able to retrieve them on-demand +//! if needed. For instance if parent block hashes are stored in the MMR it's possible at any point +//! in time to provide a MMR proof about some past block hash, while this data can be safely pruned +//! from on-chain storage. //! //! NOTE This pallet is experimental and not proven to work in production. -//! #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; use frame_support::weights::Weight; use sp_runtime::traits; -mod default_weights; -mod mmr; #[cfg(any(feature = "runtime-benchmarks", test))] mod benchmarking; +mod default_weights; +mod mmr; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -pub use pallet_mmr_primitives as primitives; pub use pallet::*; +pub use pallet_mmr_primitives as primitives; pub trait WeightInfo { fn on_initialize(peaks: u64) -> Weight; @@ -79,9 +78,9 @@ pub trait WeightInfo { #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -116,8 +115,15 @@ pub mod pallet { /// /// This type is actually going to be stored in the MMR. /// Required to be provided again, to satisfy trait bounds for storage items. - type Hash: traits::Member + traits::MaybeSerializeDeserialize + sp_std::fmt::Debug - + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + codec::Codec + type Hash: traits::Member + + traits::MaybeSerializeDeserialize + + sp_std::fmt::Debug + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default + + codec::Codec + codec::EncodeLike; /// Data stored in the leaf nodes. @@ -147,7 +153,8 @@ pub mod pallet { /// Latest MMR Root hash. #[pallet::storage] #[pallet::getter(fn mmr_root_hash)] - pub type RootHash, I: 'static = ()> = StorageValue<_, >::Hash, ValueQuery>; + pub type RootHash, I: 'static = ()> = + StorageValue<_, >::Hash, ValueQuery>; /// Current size of the MMR (number of leaves). #[pallet::storage] @@ -160,13 +167,8 @@ pub mod pallet { /// are pruned and only stored in the Offchain DB. #[pallet::storage] #[pallet::getter(fn mmr_peak)] - pub type Nodes, I: 'static = ()> = StorageMap< - _, - Identity, - u64, - >::Hash, - OptionQuery - >; + pub type Nodes, I: 'static = ()> = + StorageMap<_, Identity, u64, >::Hash, OptionQuery>; #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { @@ -211,7 +213,8 @@ pub fn verify_leaf_proof( root: H::Output, leaf: mmr::Node, proof: primitives::Proof, -) -> Result<(), primitives::Error> where +) -> Result<(), primitives::Error> +where H: traits::Hash, L: primitives::FullLeaf, { @@ -234,10 +237,9 @@ impl, I: 'static> Pallet { /// (Offchain Worker or Runtime API call), since it requires /// all the leaves to be present. /// It may return an error or panic if used incorrectly. - pub fn generate_proof(leaf_index: u64) -> Result< - (LeafOf, primitives::Proof<>::Hash>), - primitives::Error, - > { + pub fn generate_proof( + leaf_index: u64, + ) -> Result<(LeafOf, primitives::Proof<>::Hash>), primitives::Error> { let mmr: ModuleMmr = mmr::Mmr::new(Self::mmr_leaves()); mmr.generate_proof(leaf_index) } @@ -252,13 +254,12 @@ impl, I: 'static> Pallet { leaf: LeafOf, proof: primitives::Proof<>::Hash>, ) -> Result<(), primitives::Error> { - if proof.leaf_count > Self::mmr_leaves() - || proof.leaf_count == 0 - || proof.items.len() as u32 > mmr::utils::NodesUtils::new(proof.leaf_count).depth() + if proof.leaf_count > Self::mmr_leaves() || + proof.leaf_count == 0 || + proof.items.len() as u32 > mmr::utils::NodesUtils::new(proof.leaf_count).depth() { - return Err(primitives::Error::Verify.log_debug( - "The proof has incorrect number of leaves or proof items." - )); + return Err(primitives::Error::Verify + .log_debug("The proof has incorrect number of leaves or proof items.")) } let mmr: ModuleMmr = mmr::Mmr::new(proof.leaf_count); diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs index 53b76ba8000a4328ce8e1cf3eacb5523c2f2821d..d5036e58f432e48884bf7a2188a239deb9f93816 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -16,13 +16,13 @@ // limitations under the License. use crate::{ - Config, HashingOf, mmr::{ - Node, NodeOf, Hasher, - storage::{Storage, OffchainStorage, RuntimeStorage}, + storage::{OffchainStorage, RuntimeStorage, Storage}, utils::NodesUtils, + Hasher, Node, NodeOf, }, primitives::{self, Error}, + Config, HashingOf, }; #[cfg(not(feature = "std"))] use sp_std::vec; @@ -32,45 +32,39 @@ pub fn verify_leaf_proof( root: H::Output, leaf: Node, proof: primitives::Proof, -) -> Result where +) -> Result +where H: sp_runtime::traits::Hash, L: primitives::FullLeaf, { let size = NodesUtils::new(proof.leaf_count).size(); let leaf_position = mmr_lib::leaf_index_to_pos(proof.leaf_index); - let p = mmr_lib::MerkleProof::< - Node, - Hasher, - >::new( + let p = mmr_lib::MerkleProof::, Hasher>::new( size, proof.items.into_iter().map(Node::Hash).collect(), ); - p.verify( - Node::Hash(root), - vec![(leaf_position, leaf)], - ).map_err(|e| Error::Verify.log_debug(e)) + p.verify(Node::Hash(root), vec![(leaf_position, leaf)]) + .map_err(|e| Error::Verify.log_debug(e)) } /// A wrapper around a MMR library to expose limited functionality. /// /// Available functions depend on the storage kind ([Runtime](crate::mmr::storage::RuntimeStorage) /// vs [Off-chain](crate::mmr::storage::OffchainStorage)). -pub struct Mmr where +pub struct Mmr +where T: Config, I: 'static, L: primitives::FullLeaf, Storage: mmr_lib::MMRStore>, { - mmr: mmr_lib::MMR< - NodeOf, - Hasher, L>, - Storage - >, + mmr: mmr_lib::MMR, Hasher, L>, Storage>, leaves: u64, } -impl Mmr where +impl Mmr +where T: Config, I: 'static, L: primitives::FullLeaf, @@ -79,10 +73,7 @@ impl Mmr where /// Create a pointer to an existing MMR with given number of leaves. pub fn new(leaves: u64) -> Self { let size = NodesUtils::new(leaves).size(); - Self { - mmr: mmr_lib::MMR::new(size, Default::default()), - leaves, - } + Self { mmr: mmr_lib::MMR::new(size, Default::default()), leaves } } /// Verify proof of a single leaf. @@ -91,19 +82,14 @@ impl Mmr where leaf: L, proof: primitives::Proof<>::Hash>, ) -> Result { - let p = mmr_lib::MerkleProof::< - NodeOf, - Hasher, L>, - >::new( + let p = mmr_lib::MerkleProof::, Hasher, L>>::new( self.mmr.mmr_size(), proof.items.into_iter().map(Node::Hash).collect(), ); let position = mmr_lib::leaf_index_to_pos(proof.leaf_index); let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; - p.verify( - root, - vec![(position, Node::Data(leaf))], - ).map_err(|e| Error::Verify.log_debug(e)) + p.verify(root, vec![(position, Node::Data(leaf))]) + .map_err(|e| Error::Verify.log_debug(e)) } /// Return the internal size of the MMR (number of nodes). @@ -114,19 +100,18 @@ impl Mmr where } /// Runtime specific MMR functions. -impl Mmr where +impl Mmr +where T: Config, I: 'static, L: primitives::FullLeaf, { - /// Push another item to the MMR. /// /// Returns element position (index) in the MMR. pub fn push(&mut self, leaf: L) -> Option { - let position = self.mmr.push(Node::Data(leaf)) - .map_err(|e| Error::Push.log_error(e)) - .ok()?; + let position = + self.mmr.push(Node::Data(leaf)).map_err(|e| Error::Push.log_error(e)).ok()?; self.leaves += 1; @@ -143,7 +128,8 @@ impl Mmr where } /// Off-chain specific MMR functions. -impl Mmr where +impl Mmr +where T: Config, I: 'static, L: primitives::FullLeaf + codec::Decode, @@ -152,10 +138,10 @@ impl Mmr where /// /// Proof generation requires all the nodes (or their hashes) to be available in the storage. /// (i.e. you can't run the function in the pruned storage). - pub fn generate_proof(&self, leaf_index: u64) -> Result< - (L, primitives::Proof<>::Hash>), - Error - > { + pub fn generate_proof( + &self, + leaf_index: u64, + ) -> Result<(L, primitives::Proof<>::Hash>), Error> { let position = mmr_lib::leaf_index_to_pos(leaf_index); let store = >::default(); let leaf = match mmr_lib::MMRStore::get_elem(&store, position) { @@ -163,7 +149,8 @@ impl Mmr where e => return Err(Error::LeafNotFound.log_debug(e)), }; let leaf_count = self.leaves; - self.mmr.gen_proof(vec![position]) + self.mmr + .gen_proof(vec![position]) .map_err(|e| Error::GenerateProof.log_error(e)) .map(|p| primitives::Proof { leaf_index, @@ -173,4 +160,3 @@ impl Mmr where .map(|p| (leaf, p)) } } - diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs index e705b247067e5ef52a56f3a85b24d1ae48076e45..ec2dfe245bd4187252dbaca7aaf689ddd562c1a5 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mod.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mod.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod mmr; pub mod storage; pub mod utils; -mod mmr; use crate::primitives::FullLeaf; use sp_runtime::traits; -pub use self::mmr::{Mmr, verify_leaf_proof}; +pub use self::mmr::{verify_leaf_proof, Mmr}; /// Node type for runtime `T`. pub type NodeOf = Node<>::Hashing, L>; diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index 65fe19556630c16cf01543e2b03931b0493ca808..09e24017816ecd7913dda13383baec9dd1ec0dd4 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -21,8 +21,10 @@ use codec::Encode; #[cfg(not(feature = "std"))] use sp_std::prelude::Vec; -use crate::mmr::{NodeOf, Node}; -use crate::{NumberOfLeaves, Nodes, Pallet, Config, primitives}; +use crate::{ + mmr::{Node, NodeOf}, + primitives, Config, Nodes, NumberOfLeaves, Pallet, +}; /// A marker type for runtime-specific storage implementation. /// @@ -44,9 +46,7 @@ pub struct OffchainStorage; /// /// There are two different implementations depending on the use case. /// See docs for [RuntimeStorage] and [OffchainStorage]. -pub struct Storage( - sp_std::marker::PhantomData<(StorageType, T, I, L)> -); +pub struct Storage(sp_std::marker::PhantomData<(StorageType, T, I, L)>); impl Default for Storage { fn default() -> Self { @@ -54,7 +54,8 @@ impl Default for Storage { } } -impl mmr_lib::MMRStore> for Storage where +impl mmr_lib::MMRStore> for Storage +where T: Config, I: 'static, L: primitives::FullLeaf + codec::Decode, @@ -62,32 +63,30 @@ impl mmr_lib::MMRStore> for Storage mmr_lib::Result>> { let key = Pallet::::offchain_key(pos); // Retrieve the element from Off-chain DB. - Ok(sp_io::offchain - ::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) + Ok(sp_io::offchain::local_storage_get(sp_core::offchain::StorageKind::PERSISTENT, &key) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } fn append(&mut self, _: u64, _: Vec>) -> mmr_lib::Result<()> { panic!("MMR must not be altered in the off-chain context.") - } + } } -impl mmr_lib::MMRStore> for Storage where +impl mmr_lib::MMRStore> for Storage +where T: Config, I: 'static, L: primitives::FullLeaf, { fn get_elem(&self, pos: u64) -> mmr_lib::Result>> { - Ok(>::get(pos) - .map(Node::Hash) - ) + Ok(>::get(pos).map(Node::Hash)) } fn append(&mut self, pos: u64, elems: Vec>) -> mmr_lib::Result<()> { let mut leaves = crate::NumberOfLeaves::::get(); let mut size = crate::mmr::utils::NodesUtils::new(leaves).size(); if pos != size { - return Err(mmr_lib::Error::InconsistentStore); + return Err(mmr_lib::Error::InconsistentStore) } for elem in elems { diff --git a/substrate/frame/merkle-mountain-range/src/mmr/utils.rs b/substrate/frame/merkle-mountain-range/src/mmr/utils.rs index 34ae6e1a3c78a2968a6e5eab2590cfbfece191f1..4f103fa3b8c0fa51865e823bf7c0b098b7589923 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/utils.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/utils.rs @@ -49,9 +49,7 @@ impl NodesUtils { return 0 } - 64 - self.no_of_leaves - .next_power_of_two() - .leading_zeros() + 64 - self.no_of_leaves.next_power_of_two().leading_zeros() } } @@ -123,9 +121,6 @@ mod tests { actual_sizes.push(mmr.size()); }) } - assert_eq!( - sizes[1..], - actual_sizes[..], - ); + assert_eq!(sizes[1..], actual_sizes[..],); } } diff --git a/substrate/frame/merkle-mountain-range/src/mock.rs b/substrate/frame/merkle-mountain-range/src/mock.rs index cfd8212e69847bc7d8c35dff2128e3d3395c406c..4a6b224b051b371312e94246b817c4b2e8d6cdaf 100644 --- a/substrate/frame/merkle-mountain-range/src/mock.rs +++ b/substrate/frame/merkle-mountain-range/src/mock.rs @@ -15,21 +15,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; use crate as pallet_mmr; +use crate::*; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_support::parameter_types; -use pallet_mmr_primitives::{LeafDataProvider, Compact}; +use pallet_mmr_primitives::{Compact, LeafDataProvider}; use sp_core::H256; use sp_runtime::{ testing::Header, - traits::{ - BlakeTwo256, Keccak256, IdentityLookup, - }, + traits::{BlakeTwo256, IdentityLookup, Keccak256}, }; -use sp_std::cell::RefCell; -use sp_std::prelude::*; +use sp_std::{cell::RefCell, prelude::*}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -92,10 +89,7 @@ pub struct LeafData { impl LeafData { pub fn new(a: u64) -> Self { - Self { - a, - b: Default::default(), - } + Self { a, b: Default::default() } } } diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs index 5640468ac93a5358dd47a9557ceb90bf4209531e..50512e92869510656751774f24c975730ba8836f 100644 --- a/substrate/frame/merkle-mountain-range/src/tests.rs +++ b/substrate/frame/merkle-mountain-range/src/tests.rs @@ -15,18 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::*; -use crate::mock::*; +use crate::{mock::*, *}; use frame_support::traits::OnInitialize; +use pallet_mmr_primitives::{Compact, Proof}; use sp_core::{ + offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, H256, - offchain::{ - testing::TestOffchainExt, - OffchainWorkerExt, OffchainDbExt, - }, }; -use pallet_mmr_primitives::{Proof, Compact}; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { frame_system::GenesisConfig::default().build_storage::().unwrap().into() @@ -58,13 +54,12 @@ pub(crate) fn hex(s: &str) -> H256 { type BlockNumber = ::BlockNumber; -fn decode_node(v: Vec) -> mmr::Node< - ::Hashing, - ((BlockNumber, H256), LeafData), -> { +fn decode_node( + v: Vec, +) -> mmr::Node<::Hashing, ((BlockNumber, H256), LeafData)> { use crate::primitives::DataOrHash; - type A = DataOrHash::<::Hashing, (BlockNumber, H256)>; - type B = DataOrHash::<::Hashing, LeafData>; + type A = DataOrHash<::Hashing, (BlockNumber, H256)>; + type B = DataOrHash<::Hashing, LeafData>; type Node = mmr::Node<::Hashing, (A, B)>; let tuple: Node = codec::Decode::decode(&mut &v[..]).unwrap(); @@ -89,7 +84,9 @@ fn should_start_empty() { // given assert_eq!( crate::RootHash::::get(), - "0000000000000000000000000000000000000000000000000000000000000000".parse().unwrap() + "0000000000000000000000000000000000000000000000000000000000000000" + .parse() + .unwrap() ); assert_eq!(crate::NumberOfLeaves::::get(), 0); assert_eq!(crate::Nodes::::get(0), None); @@ -99,8 +96,10 @@ fn should_start_empty() { // then assert_eq!(crate::NumberOfLeaves::::get(), 1); - assert_eq!(crate::Nodes::::get(0), - Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0"))); + assert_eq!( + crate::Nodes::::get(0), + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")) + ); assert_eq!( crate::RootHash::::get(), hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0") @@ -120,35 +119,41 @@ fn should_append_to_mmr_when_on_initialize_is_called() { // then assert_eq!(crate::NumberOfLeaves::::get(), 2); - assert_eq!(( - crate::Nodes::::get(0), - crate::Nodes::::get(1), - crate::Nodes::::get(2), - crate::Nodes::::get(3), - crate::RootHash::::get(), - ), ( - Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), - Some(hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705")), - Some(hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")), - None, - hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), - )); + assert_eq!( + ( + crate::Nodes::::get(0), + crate::Nodes::::get(1), + crate::Nodes::::get(2), + crate::Nodes::::get(3), + crate::RootHash::::get(), + ), + ( + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + Some(hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705")), + Some(hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854")), + None, + hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854"), + ) + ); }); // make sure the leaves end up in the offchain DB ext.persist_offchain_overlay(); let offchain_db = ext.offchain_db(); - assert_eq!(offchain_db.get(&MMR::offchain_key(0)).map(decode_node), Some(mmr::Node::Data(( - (0, H256::repeat_byte(1)), - LeafData::new(1), - )))); - assert_eq!(offchain_db.get(&MMR::offchain_key(1)).map(decode_node), Some(mmr::Node::Data(( - (1, H256::repeat_byte(2)), - LeafData::new(2), - )))); - assert_eq!(offchain_db.get(&MMR::offchain_key(2)).map(decode_node), Some(mmr::Node::Hash( - hex("672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854") - ))); + assert_eq!( + offchain_db.get(&MMR::offchain_key(0)).map(decode_node), + Some(mmr::Node::Data(((0, H256::repeat_byte(1)), LeafData::new(1),))) + ); + assert_eq!( + offchain_db.get(&MMR::offchain_key(1)).map(decode_node), + Some(mmr::Node::Data(((1, H256::repeat_byte(2)), LeafData::new(2),))) + ); + assert_eq!( + offchain_db.get(&MMR::offchain_key(2)).map(decode_node), + Some(mmr::Node::Hash(hex( + "672c04a9cd05a644789d769daa552d35d8de7c33129f8a7cbf49e595234c4854" + ))) + ); assert_eq!(offchain_db.get(&MMR::offchain_key(3)), None); } @@ -161,15 +166,18 @@ fn should_construct_larger_mmr_correctly() { // then assert_eq!(crate::NumberOfLeaves::::get(), 7); - assert_eq!(( - crate::Nodes::::get(0), - crate::Nodes::::get(10), - crate::RootHash::::get(), - ), ( - Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), - Some(hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c")), - hex("e45e25259f7930626431347fa4dd9aae7ac83b4966126d425ca70ab343709d2c"), - )); + assert_eq!( + ( + crate::Nodes::::get(0), + crate::Nodes::::get(10), + crate::RootHash::::get(), + ), + ( + Some(hex("4320435e8c3318562dba60116bdbcc0b82ffcecb9bb39aae3300cfda3ad0b8b0")), + Some(hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c")), + hex("e45e25259f7930626431347fa4dd9aae7ac83b4966126d425ca70ab343709d2c"), + ) + ); }); } @@ -192,41 +200,50 @@ fn should_generate_proofs_correctly() { .collect::>(); // then - assert_eq!(proofs[0], (Compact::new(( - (0, H256::repeat_byte(1)).into(), - LeafData::new(1).into(), - )), Proof { - leaf_index: 0, - leaf_count: 7, - items: vec![ - hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), - hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), - hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), - ], - })); - assert_eq!(proofs[4], (Compact::new(( - (4, H256::repeat_byte(5)).into(), - LeafData::new(5).into(), - )), Proof { - leaf_index: 4, - leaf_count: 7, - items: vec![ - hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), - hex("8ed25570209d8f753d02df07c1884ddb36a3d9d4770e4608b188322151c657fe"), - hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c"), - ], - })); - assert_eq!(proofs[6], (Compact::new(( - (6, H256::repeat_byte(7)).into(), - LeafData::new(7).into(), - )), Proof { - leaf_index: 6, - leaf_count: 7, - items: vec![ - hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), - hex("7e4316ae2ebf7c3b6821cb3a46ca8b7a4f9351a9b40fcf014bb0a4fd8e8f29da"), - ], - })); + assert_eq!( + proofs[0], + ( + Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),)), + Proof { + leaf_index: 0, + leaf_count: 7, + items: vec![ + hex("ad4cbc033833612ccd4626d5f023b9dfc50a35e838514dd1f3c86f8506728705"), + hex("cb24f4614ad5b2a5430344c99545b421d9af83c46fd632d70a332200884b4d46"), + hex("dca421199bdcc55bb773c6b6967e8d16675de69062b52285ca63685241fdf626"), + ], + } + ) + ); + assert_eq!( + proofs[4], + ( + Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),)), + Proof { + leaf_index: 4, + leaf_count: 7, + items: vec![ + hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), + hex("8ed25570209d8f753d02df07c1884ddb36a3d9d4770e4608b188322151c657fe"), + hex("611c2174c6164952a66d985cfe1ec1a623794393e3acff96b136d198f37a648c"), + ], + } + ) + ); + assert_eq!( + proofs[6], + ( + Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),)), + Proof { + leaf_index: 6, + leaf_count: 7, + items: vec![ + hex("ae88a0825da50e953e7a359c55fe13c8015e48d03d301b8bdfc9193874da9252"), + hex("7e4316ae2ebf7c3b6821cb3a46ca8b7a4f9351a9b40fcf014bb0a4fd8e8f29da"), + ], + } + ) + ); }); } @@ -280,7 +297,10 @@ fn verification_should_be_stateless() { // Verify proof without relying on any on-chain data. let leaf = crate::primitives::DataOrHash::Data(leaf); - assert_eq!(crate::verify_leaf_proof::<::Hashing, _>(root, leaf, proof5), Ok(())); + assert_eq!( + crate::verify_leaf_proof::<::Hashing, _>(root, leaf, proof5), + Ok(()) + ); } #[test] diff --git a/substrate/frame/metadata/src/lib.rs b/substrate/frame/metadata/src/lib.rs index ba232a88f11c4e749af6b2aed63e562f88bb3c91..7dcf5932df28361b5e6d19c268ef35cba43e5ef0 100644 --- a/substrate/frame/metadata/src/lib.rs +++ b/substrate/frame/metadata/src/lib.rs @@ -24,12 +24,12 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::Serialize; -#[cfg(feature = "std")] -use codec::{Decode, Input, Error}; +use codec::{Decode, Error, Input}; use codec::{Encode, Output}; -use sp_std::vec::Vec; +#[cfg(feature = "std")] +use serde::Serialize; use sp_core::RuntimeDebug; +use sp_std::vec::Vec; #[cfg(feature = "std")] type StringBuf = String; @@ -47,12 +47,20 @@ type StringBuf = &'static str; /// /// For example a `&'static [ &'static str ]` can be decoded to a `Vec`. #[derive(Clone)] -pub enum DecodeDifferent where B: 'static, O: 'static { +pub enum DecodeDifferent +where + B: 'static, + O: 'static, +{ Encode(B), Decoded(O), } -impl Encode for DecodeDifferent where B: Encode + 'static, O: Encode + 'static { +impl Encode for DecodeDifferent +where + B: Encode + 'static, + O: Encode + 'static, +{ fn encode_to(&self, dest: &mut W) { match self { DecodeDifferent::Encode(b) => b.encode_to(dest), @@ -61,14 +69,21 @@ impl Encode for DecodeDifferent where B: Encode + 'static, O: Encode } } -impl codec::EncodeLike for DecodeDifferent where B: Encode + 'static, O: Encode + 'static {} +impl codec::EncodeLike for DecodeDifferent +where + B: Encode + 'static, + O: Encode + 'static, +{ +} #[cfg(feature = "std")] -impl Decode for DecodeDifferent where B: 'static, O: Decode + 'static { +impl Decode for DecodeDifferent +where + B: 'static, + O: Decode + 'static, +{ fn decode(input: &mut I) -> Result { - ::decode(input).map(|val| { - DecodeDifferent::Decoded(val) - }) + ::decode(input).map(|val| DecodeDifferent::Decoded(val)) } } @@ -83,13 +98,16 @@ where } impl Eq for DecodeDifferent - where B: Encode + Eq + PartialEq + 'static, O: Encode + Eq + PartialEq + 'static -{} +where + B: Encode + Eq + PartialEq + 'static, + O: Encode + Eq + PartialEq + 'static, +{ +} impl sp_std::fmt::Debug for DecodeDifferent - where - B: sp_std::fmt::Debug + Eq + 'static, - O: sp_std::fmt::Debug + Eq + 'static, +where + B: sp_std::fmt::Debug + Eq + 'static, + O: sp_std::fmt::Debug + Eq + 'static, { fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { match self { @@ -101,11 +119,14 @@ impl sp_std::fmt::Debug for DecodeDifferent #[cfg(feature = "std")] impl serde::Serialize for DecodeDifferent - where - B: serde::Serialize + 'static, - O: serde::Serialize + 'static, +where + B: serde::Serialize + 'static, + O: serde::Serialize + 'static, { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { match self { DecodeDifferent::Encode(b) => b.serialize(serializer), DecodeDifferent::Decoded(o) => o.serialize(serializer), @@ -113,7 +134,7 @@ impl serde::Serialize for DecodeDifferent } } -pub type DecodeDifferentArray = DecodeDifferent<&'static [B], Vec>; +pub type DecodeDifferentArray = DecodeDifferent<&'static [B], Vec>; type DecodeDifferentStr = DecodeDifferent<&'static str, StringBuf>; @@ -136,7 +157,9 @@ pub struct FunctionArgumentMetadata { /// Newtype wrapper for support encoding functions (actual the result of the function). #[derive(Clone, Eq)] -pub struct FnEncode(pub fn() -> E) where E: Encode + 'static; +pub struct FnEncode(pub fn() -> E) +where + E: Encode + 'static; impl Encode for FnEncode { fn encode_to(&self, dest: &mut W) { @@ -160,7 +183,10 @@ impl sp_std::fmt::Debug for FnEncode { #[cfg(feature = "std")] impl serde::Serialize for FnEncode { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { self.0().serialize(serializer) } } @@ -172,7 +198,7 @@ pub struct OuterEventMetadata { pub name: DecodeDifferentStr, pub events: DecodeDifferentArray< (&'static str, FnEncode<&'static [EventMetadata]>), - (StringBuf, Vec) + (StringBuf, Vec), >, } @@ -253,11 +279,14 @@ impl PartialEq for DefaultByteGetter { } } -impl Eq for DefaultByteGetter { } +impl Eq for DefaultByteGetter {} #[cfg(feature = "std")] impl serde::Serialize for DefaultByteGetter { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { self.0.default_byte().serialize(serializer) } } @@ -378,7 +407,7 @@ pub enum RuntimeMetadata { /// Enum that should fail. #[derive(Eq, PartialEq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize))] -pub enum RuntimeMetadataDeprecated { } +pub enum RuntimeMetadataDeprecated {} impl Encode for RuntimeMetadataDeprecated { fn encode_to(&self, _dest: &mut W) {} diff --git a/substrate/frame/multisig/src/benchmarking.rs b/substrate/frame/multisig/src/benchmarking.rs index 63a178313addd39bc44029d57594a5148b56c030..393e15292e6bceb842bd6131c7d62585f5f938dd 100644 --- a/substrate/frame/multisig/src/benchmarking.rs +++ b/substrate/frame/multisig/src/benchmarking.rs @@ -20,20 +20,18 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use core::convert::TryInto; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use core::convert::TryInto; use crate::Pallet as Multisig; const SEED: u32 = 0; -fn setup_multi(s: u32, z: u32) - -> Result<(Vec, Vec), &'static str> -{ +fn setup_multi(s: u32, z: u32) -> Result<(Vec, Vec), &'static str> { let mut signatories: Vec = Vec::new(); - for i in 0 .. s { + for i in 0..s { let signatory = account("signatory", i, SEED); // Give them some balance for a possible deposit let balance = BalanceOf::::max_value(); @@ -298,8 +296,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Multisig, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/multisig/src/lib.rs b/substrate/frame/multisig/src/lib.rs index bc7ce7029a95bde6a952c84250c61b01a6babff7..6522abd72f073904fb398642558adf93d45a394a 100644 --- a/substrate/frame/multisig/src/lib.rs +++ b/substrate/frame/multisig/src/lib.rs @@ -46,25 +46,33 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -mod tests; mod benchmarking; +mod tests; pub mod weights; -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_io::hashing::blake2_256; -use frame_support::{ensure, RuntimeDebug}; -use frame_support::{traits::{Get, ReservableCurrency, Currency}, - weights::{Weight, GetDispatchInfo}, - dispatch::{DispatchResultWithPostInfo, DispatchResult, DispatchErrorWithPostInfo, PostDispatchInfo}, +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::{ + DispatchErrorWithPostInfo, DispatchResult, DispatchResultWithPostInfo, PostDispatchInfo, + }, + ensure, + traits::{Currency, Get, ReservableCurrency}, + weights::{GetDispatchInfo, Weight}, + RuntimeDebug, }; use frame_system::{self as system, RawOrigin}; -use sp_runtime::{DispatchError, traits::{Dispatchable, Zero}}; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + traits::{Dispatchable, Zero}, + DispatchError, +}; +use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// Just a bunch of bytes, but they should decode to a valid `Call`. pub type OpaqueCall = Vec; @@ -100,10 +108,10 @@ enum CallOrHash { } #[frame_support::pallet] -pub mod pallet{ +pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { @@ -111,8 +119,10 @@ pub mod pallet{ type Event: From> + IsType<::Event>; /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From>; + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From>; /// The currency mechanism. type Currency: ReservableCurrency; @@ -156,12 +166,8 @@ pub mod pallet{ >; #[pallet::storage] - pub type Calls = StorageMap< - _, - Identity, - [u8; 32], - (OpaqueCall, T::AccountId, BalanceOf), - >; + pub type Calls = + StorageMap<_, Identity, [u8; 32], (OpaqueCall, T::AccountId, BalanceOf)>; #[pallet::error] pub enum Error { @@ -209,9 +215,15 @@ pub mod pallet{ /// \[approving, timepoint, multisig, call_hash\] MultisigApproval(T::AccountId, Timepoint, T::AccountId, CallHash), /// A multisig operation has been executed. \[approving, timepoint, multisig, call_hash\] - MultisigExecuted(T::AccountId, Timepoint, T::AccountId, CallHash, DispatchResult), + MultisigExecuted( + T::AccountId, + Timepoint, + T::AccountId, + CallHash, + DispatchResult, + ), /// A multisig operation has been cancelled. \[cancelling, timepoint, multisig, call_hash\] - MultisigCancelled(T::AccountId, Timepoint, T::AccountId, CallHash) + MultisigCancelled(T::AccountId, Timepoint, T::AccountId, CallHash), } #[pallet::hooks] @@ -262,21 +274,26 @@ pub mod pallet{ let call_len = call.using_encoded(|c| c.len()); let result = call.dispatch(RawOrigin::Signed(id).into()); - result.map(|post_dispatch_info| post_dispatch_info.actual_weight - .map(|actual_weight| - T::WeightInfo::as_multi_threshold_1(call_len as u32) - .saturating_add(actual_weight) - ).into() - ).map_err(|err| match err.post_info.actual_weight { - Some(actual_weight) => { - let weight_used = T::WeightInfo::as_multi_threshold_1(call_len as u32) - .saturating_add(actual_weight); - let post_info = Some(weight_used).into(); - let error = err.error.into(); - DispatchErrorWithPostInfo { post_info, error } - }, - None => err, - }) + result + .map(|post_dispatch_info| { + post_dispatch_info + .actual_weight + .map(|actual_weight| { + T::WeightInfo::as_multi_threshold_1(call_len as u32) + .saturating_add(actual_weight) + }) + .into() + }) + .map_err(|err| match err.post_info.actual_weight { + Some(actual_weight) => { + let weight_used = T::WeightInfo::as_multi_threshold_1(call_len as u32) + .saturating_add(actual_weight); + let post_info = Some(weight_used).into(); + let error = err.error.into(); + DispatchErrorWithPostInfo { post_info, error } + }, + None => err, + }) } /// Register approval for a dispatch to be made from a deterministic composite account if @@ -345,7 +362,14 @@ pub mod pallet{ max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - Self::operate(who, threshold, other_signatories, maybe_timepoint, CallOrHash::Call(call, store_call), max_weight) + Self::operate( + who, + threshold, + other_signatories, + maybe_timepoint, + CallOrHash::Call(call, store_call), + max_weight, + ) } /// Register approval for a dispatch to be made from a deterministic composite account if @@ -401,7 +425,14 @@ pub mod pallet{ max_weight: Weight, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - Self::operate(who, threshold, other_signatories, maybe_timepoint, CallOrHash::Hash(call_hash), max_weight) + Self::operate( + who, + threshold, + other_signatories, + maybe_timepoint, + CallOrHash::Hash(call_hash), + max_weight, + ) } /// Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously @@ -447,8 +478,7 @@ pub mod pallet{ let id = Self::multi_account_id(&signatories, threshold); - let m = >::get(&id, call_hash) - .ok_or(Error::::NotFound)?; + let m = >::get(&id, call_hash).ok_or(Error::::NotFound)?; ensure!(m.when == timepoint, Error::::WrongTimepoint); ensure!(m.depositor == who, Error::::NotOwner); @@ -496,7 +526,7 @@ impl Pallet { let call_hash = blake2_256(&call); let call_len = call.len(); (call_hash, call_len, Some(call), should_store) - } + }, CallOrHash::Hash(h) => (h, 0, None, false), }; @@ -511,12 +541,16 @@ impl Pallet { // We only bother with the approval if we're below threshold. let maybe_pos = m.approvals.binary_search(&who).err().filter(|_| approvals < threshold); // Bump approvals if not yet voted and the vote is needed. - if maybe_pos.is_some() { approvals += 1; } + if maybe_pos.is_some() { + approvals += 1; + } // We only bother fetching/decoding call if we know that we're ready to execute. let maybe_approved_call = if approvals >= threshold { Self::get_call(&call_hash, maybe_call.as_ref().map(|c| c.as_ref())) - } else { None }; + } else { + None + }; if let Some((call, call_len)) = maybe_approved_call { // verify weight @@ -530,21 +564,33 @@ impl Pallet { let result = call.dispatch(RawOrigin::Signed(id.clone()).into()); Self::deposit_event(Event::MultisigExecuted( - who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) + who, + timepoint, + id, + call_hash, + result.map(|_| ()).map_err(|e| e.error), )); - Ok(get_result_weight(result).map(|actual_weight| - T::WeightInfo::as_multi_complete( - other_signatories_len as u32, - call_len as u32 - ).saturating_add(actual_weight) - ).into()) + Ok(get_result_weight(result) + .map(|actual_weight| { + T::WeightInfo::as_multi_complete( + other_signatories_len as u32, + call_len as u32, + ) + .saturating_add(actual_weight) + }) + .into()) } else { // We cannot dispatch the call now; either it isn't available, or it is, but we // don't have threshold approvals even with our signature. // Store the call if desired. let stored = if let Some(data) = maybe_call.filter(|_| store) { - Self::store_call_and_reserve(who.clone(), &call_hash, data, BalanceOf::::zero())?; + Self::store_call_and_reserve( + who.clone(), + &call_hash, + data, + BalanceOf::::zero(), + )?; true } else { false @@ -567,10 +613,7 @@ impl Pallet { call_len as u32, ) } else { - T::WeightInfo::as_multi_approve( - other_signatories_len as u32, - call_len as u32, - ) + T::WeightInfo::as_multi_approve(other_signatories_len as u32, call_len as u32) }; // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) @@ -591,24 +634,22 @@ impl Pallet { false }; - >::insert(&id, call_hash, Multisig { - when: Self::timepoint(), - deposit, - depositor: who.clone(), - approvals: vec![who.clone()], - }); + >::insert( + &id, + call_hash, + Multisig { + when: Self::timepoint(), + deposit, + depositor: who.clone(), + approvals: vec![who.clone()], + }, + ); Self::deposit_event(Event::NewMultisig(who, id, call_hash)); let final_weight = if stored { - T::WeightInfo::as_multi_create_store( - other_signatories_len as u32, - call_len as u32, - ) + T::WeightInfo::as_multi_create_store(other_signatories_len as u32, call_len as u32) } else { - T::WeightInfo::as_multi_create( - other_signatories_len as u32, - call_len as u32, - ) + T::WeightInfo::as_multi_create(other_signatories_len as u32, call_len as u32) }; // Call is not made, so the actual weight does not include call Ok(Some(final_weight).into()) @@ -627,22 +668,27 @@ impl Pallet { other_deposit: BalanceOf, ) -> DispatchResult { ensure!(!Calls::::contains_key(hash), Error::::AlreadyStored); - let deposit = other_deposit + T::DepositBase::get() - + T::DepositFactor::get() * BalanceOf::::from(((data.len() + 31) / 32) as u32); + let deposit = other_deposit + + T::DepositBase::get() + + T::DepositFactor::get() * BalanceOf::::from(((data.len() + 31) / 32) as u32); T::Currency::reserve(&who, deposit)?; Calls::::insert(&hash, (data, who, deposit)); Ok(()) } /// Attempt to decode and return the call, provided by the user or from storage. - fn get_call(hash: &[u8; 32], maybe_known: Option<&[u8]>) -> Option<(::Call, usize)> { - maybe_known.map_or_else(|| { - Calls::::get(hash).and_then(|(data, ..)| { - Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) - }) - }, |data| { - Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) - }) + fn get_call( + hash: &[u8; 32], + maybe_known: Option<&[u8]>, + ) -> Option<(::Call, usize)> { + maybe_known.map_or_else( + || { + Calls::::get(hash).and_then(|(data, ..)| { + Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())) + }) + }, + |data| Decode::decode(&mut &data[..]).ok().map(|d| (d, data.len())), + ) } /// Attempt to remove a call from storage, returning any deposit on it to the owner. @@ -661,9 +707,10 @@ impl Pallet { } /// Check that signatories is sorted and doesn't contain sender, then insert sender. - fn ensure_sorted_and_insert(other_signatories: Vec, who: T::AccountId) - -> Result, DispatchError> - { + fn ensure_sorted_and_insert( + other_signatories: Vec, + who: T::AccountId, + ) -> Result, DispatchError> { let mut signatories = other_signatories; let mut maybe_last = None; let mut index = 0; diff --git a/substrate/frame/multisig/src/tests.rs b/substrate/frame/multisig/src/tests.rs index 69f7cb17b0f5a5e94651c03d9b3d745db91356e0..6dba6f7d4ab5a61751cd6d5a3e3407bf30a2afd9 100644 --- a/substrate/frame/multisig/src/tests.rs +++ b/substrate/frame/multisig/src/tests.rs @@ -21,12 +21,13 @@ use super::*; -use frame_support::{ - assert_ok, assert_noop, parameter_types, traits::Filter, -}; -use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use crate as pallet_multisig; +use frame_support::{assert_noop, assert_ok, parameter_types, traits::Filter}; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -113,14 +114,15 @@ impl Config for Test { type WeightInfo = (); } -use pallet_balances::Call as BalancesCall; -use pallet_balances::Error as BalancesError; +use pallet_balances::{Call as BalancesCall, Error as BalancesError}; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext @@ -141,11 +143,27 @@ fn multisig_deposit_is_taken_and_returned() { let call = Call::Balances(BalancesCall::transfer(6, 15)); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); assert_eq!(Balances::free_balance(1), 2); assert_eq!(Balances::reserved_balance(1), 3); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -167,7 +185,14 @@ fn multisig_deposit_is_taken_and_returned_with_call_storage() { assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 5); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, call_weight)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash, + call_weight + )); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -186,17 +211,39 @@ fn multisig_deposit_is_taken_and_returned_with_alt_call_storage() { let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(1), 1); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), data, true, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + data, + true, + 0 + )); assert_eq!(Balances::free_balance(2), 3); assert_eq!(Balances::reserved_balance(2), 2); assert_eq!(Balances::free_balance(1), 1); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!(Multisig::approve_as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), hash, call_weight)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + hash, + call_weight + )); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::free_balance(2), 5); @@ -209,13 +256,31 @@ fn cancel_multisig_returns_deposit() { new_test_ext().execute_with(|| { let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(1), 6); assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!( - Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); assert_eq!(Balances::free_balance(1), 10); assert_eq!(Balances::reserved_balance(1), 0); }); @@ -233,7 +298,14 @@ fn timepoint_checking_works() { let hash = blake2_256(&call); assert_noop!( - Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone(), 0), + Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + ), Error::::UnexpectedTimepoint, ); @@ -243,9 +315,17 @@ fn timepoint_checking_works() { Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone(), false, 0), Error::::NoTimepoint, ); - let later = Timepoint { index: 1, .. now() }; + let later = Timepoint { index: 1, ..now() }; assert_noop!( - Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone(), false, 0), + Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(later), + call.clone(), + false, + 0 + ), Error::::WrongTimepoint, ); }); @@ -266,7 +346,14 @@ fn multisig_2_of_3_works_with_call_storing() { assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data, true, 0)); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash, call_weight)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -286,7 +373,15 @@ fn multisig_2_of_3_works() { assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash, 0)); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -303,11 +398,33 @@ fn multisig_3_of_3_works() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -317,15 +434,33 @@ fn cancel_multisig_works() { new_test_ext().execute_with(|| { let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_noop!( Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), Error::::NotOwner, ); - assert_ok!( - Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); }); } @@ -336,14 +471,25 @@ fn cancel_multisig_with_call_storage_works() { let hash = blake2_256(&call); assert_ok!(Multisig::as_multi(Origin::signed(1), 3, vec![2, 3], None, call, true, 0)); assert_eq!(Balances::free_balance(1), 4); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_noop!( Multisig::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), Error::::NotOwner, ); - assert_ok!( - Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); + assert_ok!(Multisig::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); assert_eq!(Balances::free_balance(1), 10); }); } @@ -353,9 +499,24 @@ fn cancel_multisig_with_alt_call_storage_works() { new_test_ext().execute_with(|| { let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(1), 6); - assert_ok!(Multisig::as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), call, true, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + call, + true, + 0 + )); assert_eq!(Balances::free_balance(2), 8); assert_ok!(Multisig::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash)); assert_eq!(Balances::free_balance(1), 10); @@ -374,10 +535,26 @@ fn multisig_2_of_3_as_multi_works() { let call = Call::Balances(BalancesCall::transfer(6, 15)); let call_weight = call.get_dispatch_info().weight; let data = call.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } @@ -397,10 +574,42 @@ fn multisig_2_of_3_as_multi_with_many_calls_works() { let call2_weight = call2.get_dispatch_info().weight; let data2 = call2.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data1.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], None, data2.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data1, false, call1_weight)); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data2, false, call2_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data1.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + None, + data2.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + data1, + false, + call1_weight + )); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + data2, + false, + call2_weight + )); assert_eq!(Balances::free_balance(6), 10); assert_eq!(Balances::free_balance(7), 5); @@ -419,15 +628,49 @@ fn multisig_2_of_3_cannot_reissue_same_call() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data.clone(), false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + data.clone(), + false, + call_weight + )); assert_eq!(Balances::free_balance(multi), 5); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); - assert_ok!(Multisig::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), data.clone(), false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + data.clone(), + false, + call_weight + )); let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); - System::assert_last_event(pallet_multisig::Event::MultisigExecuted(3, now(), multi, hash, Err(err)).into()); + System::assert_last_event( + pallet_multisig::Event::MultisigExecuted(3, now(), multi, hash, Err(err)).into(), + ); }); } @@ -462,14 +705,42 @@ fn duplicate_approvals_are_ignored() { new_test_ext().execute_with(|| { let call = Call::Balances(BalancesCall::transfer(6, 15)).encode(); let hash = blake2_256(&call); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash.clone(), + 0 + )); assert_noop!( - Multisig::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone(), 0), + Multisig::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + Some(now()), + hash.clone(), + 0 + ), Error::::AlreadyApproved, ); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); assert_noop!( - Multisig::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone(), 0), + Multisig::approve_as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + hash.clone(), + 0 + ), Error::::AlreadyApproved, ); }); @@ -521,7 +792,15 @@ fn weight_check_works() { let call = Call::Balances(BalancesCall::transfer(6, 15)); let data = call.encode(); - assert_ok!(Multisig::as_multi(Origin::signed(1), 2, vec![2, 3], None, data.clone(), false, 0)); + assert_ok!(Multisig::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + data.clone(), + false, + 0 + )); assert_eq!(Balances::free_balance(6), 0); assert_noop!( @@ -545,12 +824,41 @@ fn multisig_handles_no_preimage_after_all_approve() { let call_weight = call.get_dispatch_info().weight; let data = call.encode(); let hash = blake2_256(&data); - assert_ok!(Multisig::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone(), 0)); - assert_ok!(Multisig::approve_as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), hash.clone(), 0)); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone(), + 0 + )); + assert_ok!(Multisig::approve_as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + hash.clone(), + 0 + )); assert_eq!(Balances::free_balance(6), 0); - assert_ok!(Multisig::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), data, false, call_weight)); + assert_ok!(Multisig::as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + data, + false, + call_weight + )); assert_eq!(Balances::free_balance(6), 15); }); } diff --git a/substrate/frame/multisig/src/weights.rs b/substrate/frame/multisig/src/weights.rs index 50f774030015faa1899ab774352f899ebd86ff91..ce111911bbd270dbf210c077fdd1b57a680b5c87 100644 --- a/substrate/frame/multisig/src/weights.rs +++ b/substrate/frame/multisig/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/nicks/src/lib.rs b/substrate/frame/nicks/src/lib.rs index afdcca7e91a5c5d66a7f910fb330a65eab421efd..d78f1c4465651e49bb882a8e706a3615de82ccc7 100644 --- a/substrate/frame/nicks/src/lib.rs +++ b/substrate/frame/nicks/src/lib.rs @@ -41,21 +41,26 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_runtime::{ - traits::{StaticLookup, Zero} -}; -use frame_support::traits::{Currency, ReservableCurrency, OnUnbalanced}; +use frame_support::traits::{Currency, OnUnbalanced, ReservableCurrency}; pub use pallet::*; +use sp_runtime::traits::{StaticLookup, Zero}; +use sp_std::prelude::*; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; #[frame_support::pallet] pub mod pallet { - use frame_system::{ensure_signed, pallet_prelude::*}; - use frame_support::{ensure, pallet_prelude::*, traits::{EnsureOrigin, Get}}; use super::*; + use frame_support::{ + ensure, + pallet_prelude::*, + traits::{EnsureOrigin, Get}, + }; + use frame_system::{ensure_signed, pallet_prelude::*}; #[pallet::config] pub trait Config: frame_system::Config { @@ -113,7 +118,8 @@ pub mod pallet { /// The lookup table for names. #[pallet::storage] - pub(super) type NameOf = StorageMap<_, Twox64Concat, T::AccountId, (Vec, BalanceOf)>; + pub(super) type NameOf = + StorageMap<_, Twox64Concat, T::AccountId, (Vec, BalanceOf)>; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -197,7 +203,7 @@ pub mod pallet { #[pallet::weight(70_000_000)] pub fn kill_name( origin: OriginFor, - target: ::Source + target: ::Source, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; @@ -228,7 +234,7 @@ pub mod pallet { pub fn force_name( origin: OriginFor, target: ::Source, - name: Vec + name: Vec, ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; @@ -247,11 +253,12 @@ mod tests { use super::*; use crate as pallet_nicks; - use frame_support::{assert_ok, assert_noop, parameter_types, ord_parameter_types}; - use sp_core::H256; + use frame_support::{assert_noop, assert_ok, ord_parameter_types, parameter_types}; use frame_system::EnsureSignedBy; + use sp_core::H256; use sp_runtime::{ - testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -333,12 +340,9 @@ mod tests { fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10), - (2, 10), - ], - }.assimilate_storage(&mut t).unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10)] } + .assimilate_storage(&mut t) + .unwrap(); t.into() } @@ -398,7 +402,10 @@ mod tests { pallet_balances::Error::::InsufficientBalance ); - assert_noop!(Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), Error::::TooShort); + assert_noop!( + Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), + Error::::TooShort + ); assert_noop!( Nicks::set_name(Origin::signed(1), b"Gavin James Wood, Esquire".to_vec()), Error::::TooLong diff --git a/substrate/frame/node-authorization/src/lib.rs b/substrate/frame/node-authorization/src/lib.rs index 5f233549c73cac5c34e81f28a0cf22fbc4c8217e..5551ec2ad2c40da704ccd9429db47fd9b725ef28 100644 --- a/substrate/frame/node-authorization/src/lib.rs +++ b/substrate/frame/node-authorization/src/lib.rs @@ -44,22 +44,15 @@ mod tests; pub mod weights; -use sp_core::OpaquePeerId as PeerId; -use sp_std::{ - collections::btree_set::BTreeSet, - iter::FromIterator, - prelude::*, -}; pub use pallet::*; +use sp_core::OpaquePeerId as PeerId; +use sp_std::{collections::btree_set::BTreeSet, iter::FromIterator, prelude::*}; pub use weights::WeightInfo; #[frame_support::pallet] pub mod pallet { use super::*; - use frame_support::{ - dispatch::DispatchResult, - pallet_prelude::*, - }; + use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; #[pallet::pallet] @@ -104,23 +97,13 @@ pub mod pallet { /// A map that maintains the ownership of each node. #[pallet::storage] #[pallet::getter(fn owners)] - pub type Owners = StorageMap< - _, - Blake2_128Concat, - PeerId, - T::AccountId, - >; + pub type Owners = StorageMap<_, Blake2_128Concat, PeerId, T::AccountId>; /// The additional adapative connections of each node. #[pallet::storage] #[pallet::getter(fn additional_connection)] - pub type AdditionalConnections = StorageMap< - _, - Blake2_128Concat, - PeerId, - BTreeSet, - ValueQuery, - >; + pub type AdditionalConnections = + StorageMap<_, Blake2_128Concat, PeerId, BTreeSet, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { @@ -208,10 +191,10 @@ pub mod pallet { ), Ok(node) => sp_io::offchain::set_authorized_nodes( Self::get_authorized_nodes(&PeerId(node)), - true - ) + true, + ), } - } + }, } } } @@ -228,7 +211,7 @@ pub mod pallet { pub fn add_well_known_node( origin: OriginFor, node: PeerId, - owner: T::AccountId + owner: T::AccountId, ) -> DispatchResult { T::AddOrigin::ensure_origin(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); @@ -281,16 +264,15 @@ pub mod pallet { pub fn swap_well_known_node( origin: OriginFor, remove: PeerId, - add: PeerId + add: PeerId, ) -> DispatchResult { T::SwapOrigin::ensure_origin(origin)?; - ensure!( - remove.0.len() < T::MaxPeerIdLength::get() as usize, - Error::::PeerIdTooLong - ); + ensure!(remove.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); ensure!(add.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - if remove == add { return Ok(()) } + if remove == add { + return Ok(()) + } let mut nodes = WellKnownNodes::::get(); ensure!(nodes.contains(&remove), Error::::NotExist); @@ -317,7 +299,7 @@ pub mod pallet { #[pallet::weight((T::WeightInfo::reset_well_known_nodes(), DispatchClass::Operational))] pub fn reset_well_known_nodes( origin: OriginFor, - nodes: Vec<(PeerId, T::AccountId)> + nodes: Vec<(PeerId, T::AccountId)>, ) -> DispatchResult { T::ResetOrigin::ensure_origin(origin)?; ensure!(nodes.len() < T::MaxWellKnownNodes::get() as usize, Error::::TooManyNodes); @@ -337,7 +319,7 @@ pub mod pallet { let sender = ensure_signed(origin)?; ensure!(node.0.len() < T::MaxPeerIdLength::get() as usize, Error::::PeerIdTooLong); - ensure!(!Owners::::contains_key(&node),Error::::AlreadyClaimed); + ensure!(!Owners::::contains_key(&node), Error::::AlreadyClaimed); Owners::::insert(&node, &sender); Self::deposit_event(Event::NodeClaimed(node, sender)); @@ -373,7 +355,7 @@ pub mod pallet { pub fn transfer_node( origin: OriginFor, node: PeerId, - owner: T::AccountId + owner: T::AccountId, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -395,7 +377,7 @@ pub mod pallet { pub fn add_connections( origin: OriginFor, node: PeerId, - connections: Vec + connections: Vec, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -407,7 +389,7 @@ pub mod pallet { for add_node in connections.iter() { if *add_node == node { - continue; + continue } nodes.insert(add_node.clone()); } @@ -426,7 +408,7 @@ pub mod pallet { pub fn remove_connections( origin: OriginFor, node: PeerId, - connections: Vec + connections: Vec, ) -> DispatchResult { let sender = ensure_signed(origin)?; @@ -450,9 +432,7 @@ pub mod pallet { impl Pallet { fn initialize_nodes(nodes: &Vec<(PeerId, T::AccountId)>) { - let peer_ids = nodes.iter() - .map(|item| item.0.clone()) - .collect::>(); + let peer_ids = nodes.iter().map(|item| item.0.clone()).collect::>(); WellKnownNodes::::put(&peer_ids); for (node, who) in nodes.iter() { diff --git a/substrate/frame/node-authorization/src/mock.rs b/substrate/frame/node-authorization/src/mock.rs index e952ed900d4be1a3add70bc4dbc305ae45064016..302378f48ce64105d02c190550212ca6d02c07e0 100644 --- a/substrate/frame/node-authorization/src/mock.rs +++ b/substrate/frame/node-authorization/src/mock.rs @@ -20,13 +20,13 @@ use super::*; use crate as pallet_node_authorization; -use frame_support::{ - parameter_types, ord_parameter_types, - traits::GenesisBuild, -}; +use frame_support::{ord_parameter_types, parameter_types, traits::GenesisBuild}; use frame_system::EnsureSignedBy; use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -102,6 +102,8 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_node_authorization::GenesisConfig:: { nodes: vec![(test_node(10), 10), (test_node(20), 20), (test_node(30), 30)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/substrate/frame/node-authorization/src/tests.rs b/substrate/frame/node-authorization/src/tests.rs index 15a286fbc2390dcdd6270da7ded1b3d27cc2d45c..530904fa734880ac983a498e41fdf5094f8600b4 100644 --- a/substrate/frame/node-authorization/src/tests.rs +++ b/substrate/frame/node-authorization/src/tests.rs @@ -19,7 +19,7 @@ use super::*; use crate::mock::*; -use frame_support::{assert_ok, assert_noop}; +use frame_support::{assert_noop, assert_ok}; use sp_runtime::traits::BadOrigin; #[test] @@ -38,9 +38,7 @@ fn add_well_known_node_works() { Error::::AlreadyJoined ); - assert_ok!( - NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15) - ); + assert_ok!(NodeAuthorization::add_well_known_node(Origin::signed(1), test_node(15), 15)); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(10), test_node(15), test_node(20), test_node(30)]) @@ -75,13 +73,11 @@ fn remove_well_known_node_works() { AdditionalConnections::::insert( test_node(20), - BTreeSet::from_iter(vec![test_node(40)]) + BTreeSet::from_iter(vec![test_node(40)]), ); assert!(AdditionalConnections::::contains_key(test_node(20))); - assert_ok!( - NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20)) - ); + assert_ok!(NodeAuthorization::remove_well_known_node(Origin::signed(2), test_node(20))); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(10), test_node(30)]) @@ -95,56 +91,58 @@ fn remove_well_known_node_works() { fn swap_well_known_node_works() { new_test_ext().execute_with(|| { assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(4), test_node(20), test_node(5) - ), + NodeAuthorization::swap_well_known_node(Origin::signed(4), test_node(20), test_node(5)), BadOrigin ); assert_noop!( NodeAuthorization::swap_well_known_node( - Origin::signed(3), PeerId(vec![1, 2, 3]), test_node(20) + Origin::signed(3), + PeerId(vec![1, 2, 3]), + test_node(20) ), Error::::PeerIdTooLong ); assert_noop!( NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), PeerId(vec![1, 2, 3]) + Origin::signed(3), + test_node(20), + PeerId(vec![1, 2, 3]) ), Error::::PeerIdTooLong ); - assert_ok!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(20) - ) - ); + assert_ok!(NodeAuthorization::swap_well_known_node( + Origin::signed(3), + test_node(20), + test_node(20) + )); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(10), test_node(20), test_node(30)]) ); assert_noop!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(15), test_node(5) - ), + NodeAuthorization::swap_well_known_node(Origin::signed(3), test_node(15), test_node(5)), Error::::NotExist ); assert_noop!( NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(30) + Origin::signed(3), + test_node(20), + test_node(30) ), Error::::AlreadyJoined ); AdditionalConnections::::insert( test_node(20), - BTreeSet::from_iter(vec![test_node(15)]) - ); - assert_ok!( - NodeAuthorization::swap_well_known_node( - Origin::signed(3), test_node(20), test_node(5) - ) + BTreeSet::from_iter(vec![test_node(15)]), ); + assert_ok!(NodeAuthorization::swap_well_known_node( + Origin::signed(3), + test_node(20), + test_node(5) + )); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(5), test_node(10), test_node(30)]) @@ -182,12 +180,10 @@ fn reset_well_known_nodes_works() { Error::::TooManyNodes ); - assert_ok!( - NodeAuthorization::reset_well_known_nodes( - Origin::signed(4), - vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] - ) - ); + assert_ok!(NodeAuthorization::reset_well_known_nodes( + Origin::signed(4), + vec![(test_node(15), 15), (test_node(5), 5), (test_node(20), 20)] + )); assert_eq!( WellKnownNodes::::get(), BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(20)]) @@ -240,7 +236,7 @@ fn remove_claim_works() { Owners::::insert(test_node(15), 15); AdditionalConnections::::insert( test_node(15), - BTreeSet::from_iter(vec![test_node(20)]) + BTreeSet::from_iter(vec![test_node(20)]), ); assert_ok!(NodeAuthorization::remove_claim(Origin::signed(15), test_node(15))); assert!(!Owners::::contains_key(test_node(15))); @@ -275,31 +271,35 @@ fn add_connections_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::add_connections( - Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] + Origin::signed(15), + PeerId(vec![1, 2, 3]), + vec![test_node(5)] ), Error::::PeerIdTooLong ); assert_noop!( NodeAuthorization::add_connections( - Origin::signed(15), test_node(15), vec![test_node(5)] + Origin::signed(15), + test_node(15), + vec![test_node(5)] ), Error::::NotClaimed ); assert_noop!( NodeAuthorization::add_connections( - Origin::signed(15), test_node(20), vec![test_node(5)] + Origin::signed(15), + test_node(20), + vec![test_node(5)] ), Error::::NotOwner ); - assert_ok!( - NodeAuthorization::add_connections( - Origin::signed(20), - test_node(20), - vec![test_node(15), test_node(5), test_node(25), test_node(20)] - ) - ); + assert_ok!(NodeAuthorization::add_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5), test_node(25), test_node(20)] + )); assert_eq!( AdditionalConnections::::get(test_node(20)), BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) @@ -312,35 +312,39 @@ fn remove_connections_works() { new_test_ext().execute_with(|| { assert_noop!( NodeAuthorization::remove_connections( - Origin::signed(15), PeerId(vec![1, 2, 3]), vec![test_node(5)] + Origin::signed(15), + PeerId(vec![1, 2, 3]), + vec![test_node(5)] ), Error::::PeerIdTooLong ); assert_noop!( NodeAuthorization::remove_connections( - Origin::signed(15), test_node(15), vec![test_node(5)] + Origin::signed(15), + test_node(15), + vec![test_node(5)] ), Error::::NotClaimed ); assert_noop!( NodeAuthorization::remove_connections( - Origin::signed(15), test_node(20), vec![test_node(5)] + Origin::signed(15), + test_node(20), + vec![test_node(5)] ), Error::::NotOwner ); AdditionalConnections::::insert( test_node(20), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) - ); - assert_ok!( - NodeAuthorization::remove_connections( - Origin::signed(20), - test_node(20), - vec![test_node(15), test_node(5)] - ) + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]), ); + assert_ok!(NodeAuthorization::remove_connections( + Origin::signed(20), + test_node(20), + vec![test_node(15), test_node(5)] + )); assert_eq!( AdditionalConnections::::get(test_node(20)), BTreeSet::from_iter(vec![test_node(25)]) @@ -353,7 +357,7 @@ fn get_authorized_nodes_works() { new_test_ext().execute_with(|| { AdditionalConnections::::insert( test_node(20), - BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]) + BTreeSet::from_iter(vec![test_node(5), test_node(15), test_node(25)]), ); let mut authorized_nodes = Pallet::::get_authorized_nodes(&test_node(20)); diff --git a/substrate/frame/node-authorization/src/weights.rs b/substrate/frame/node-authorization/src/weights.rs index 3d01e40d67ac38977b84e23ec5ecc7b7dca65830..dbb7956cff967474471bf01b99e19a6df82baff6 100644 --- a/substrate/frame/node-authorization/src/weights.rs +++ b/substrate/frame/node-authorization/src/weights.rs @@ -17,6 +17,7 @@ //! Autogenerated weights for pallet_node_authorization +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/offences/benchmarking/src/lib.rs b/substrate/frame/offences/benchmarking/src/lib.rs index d424cfc751eefea3015af75a0183fe275a3bb1e4..0332272cf2df53b96be2ab5e877ad9f9f8df2a21 100644 --- a/substrate/frame/offences/benchmarking/src/lib.rs +++ b/substrate/frame/offences/benchmarking/src/lib.rs @@ -21,29 +21,30 @@ mod mock; -use sp_std::prelude::*; -use sp_std::vec; +use sp_std::{prelude::*, vec}; -use frame_system::{RawOrigin, Pallet as System, Config as SystemConfig}; -use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; use frame_support::traits::{Currency, ValidatorSet, ValidatorSetWithIdentification}; +use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; use sp_runtime::{ + traits::{Convert, Saturating, StaticLookup, UniqueSaturatedInto}, Perbill, - traits::{Convert, StaticLookup, Saturating, UniqueSaturatedInto}, }; -use sp_staking::offence::{ReportOffence, Offence}; +use sp_staking::offence::{Offence, ReportOffence}; -use pallet_balances::Config as BalancesConfig; use pallet_babe::BabeEquivocationOffence; +use pallet_balances::Config as BalancesConfig; use pallet_grandpa::{GrandpaEquivocationOffence, GrandpaTimeSlot}; use pallet_im_online::{Config as ImOnlineConfig, Pallet as ImOnline, UnresponsivenessOffence}; use pallet_offences::{Config as OffencesConfig, Pallet as Offences}; -use pallet_session::historical::{Config as HistoricalConfig, IdentificationTuple}; -use pallet_session::{Config as SessionConfig, SessionManager}; +use pallet_session::{ + historical::{Config as HistoricalConfig, IdentificationTuple}, + Config as SessionConfig, SessionManager, +}; use pallet_staking::{ - Pallet as Staking, Config as StakingConfig, RewardDestination, ValidatorPrefs, Exposure, - IndividualExposure, Event as StakingEvent, + Config as StakingConfig, Event as StakingEvent, Exposure, IndividualExposure, + Pallet as Staking, RewardDestination, ValidatorPrefs, }; const SEED: u32 = 0; @@ -62,7 +63,8 @@ pub trait Config: + HistoricalConfig + BalancesConfig + IdTupleConvert -{} +{ +} /// A helper trait to make sure we can convert `IdentificationTuple` coming from historical /// and the one required by offences. @@ -71,8 +73,9 @@ pub trait IdTupleConvert { fn convert(id: IdentificationTuple) -> ::IdentificationTuple; } -impl IdTupleConvert for T where - ::IdentificationTuple: From> +impl IdTupleConvert for T +where + ::IdentificationTuple: From>, { fn convert(id: IdentificationTuple) -> ::IdentificationTuple { id.into() @@ -80,7 +83,8 @@ impl IdTupleConvert for T where } type LookupSourceOf = <::Lookup as StaticLookup>::Source; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; struct Offender { pub controller: T::AccountId, @@ -109,19 +113,20 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' reward_destination.clone(), )?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - .. Default::default() - }; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller.clone()).into(), validator_prefs)?; let mut individual_exposures = vec![]; let mut nominator_stashes = vec![]; // Create n nominators - for i in 0 .. nominators { - let nominator_stash: T::AccountId = account("nominator stash", n * MAX_NOMINATORS + i, SEED); - let nominator_controller: T::AccountId = account("nominator controller", n * MAX_NOMINATORS + i, SEED); - let nominator_controller_lookup: LookupSourceOf = T::Lookup::unlookup(nominator_controller.clone()); + for i in 0..nominators { + let nominator_stash: T::AccountId = + account("nominator stash", n * MAX_NOMINATORS + i, SEED); + let nominator_controller: T::AccountId = + account("nominator controller", n * MAX_NOMINATORS + i, SEED); + let nominator_controller_lookup: LookupSourceOf = + T::Lookup::unlookup(nominator_controller.clone()); T::Currency::make_free_balance_be(&nominator_stash, free_amount.into()); Staking::::bond( @@ -132,76 +137,82 @@ fn create_offender(n: u32, nominators: u32) -> Result, &' )?; let selected_validators: Vec> = vec![controller_lookup.clone()]; - Staking::::nominate(RawOrigin::Signed(nominator_controller.clone()).into(), selected_validators)?; + Staking::::nominate( + RawOrigin::Signed(nominator_controller.clone()).into(), + selected_validators, + )?; - individual_exposures.push(IndividualExposure { - who: nominator_stash.clone(), - value: amount.clone(), - }); + individual_exposures + .push(IndividualExposure { who: nominator_stash.clone(), value: amount.clone() }); nominator_stashes.push(nominator_stash.clone()); } - let exposure = Exposure { - total: amount.clone() * n.into(), - own: amount, - others: individual_exposures, - }; + let exposure = + Exposure { total: amount.clone() * n.into(), own: amount, others: individual_exposures }; let current_era = 0u32; Staking::::add_era_stakers(current_era.into(), stash.clone().into(), exposure); Ok(Offender { controller, stash, nominator_stashes }) } -fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result< - (Vec>, Vec>), - &'static str -> { +fn make_offenders( + num_offenders: u32, + num_nominators: u32, +) -> Result<(Vec>, Vec>), &'static str> { Staking::::new_session(0); let mut offenders = vec![]; - for i in 0 .. num_offenders { + for i in 0..num_offenders { let offender = create_offender::(i + 1, num_nominators)?; offenders.push(offender); } Staking::::start_session(0); - let id_tuples = offenders.iter() - .map(|offender| + let id_tuples = offenders + .iter() + .map(|offender| { ::ValidatorIdOf::convert(offender.controller.clone()) - .expect("failed to get validator id from account id")) - .map(|validator_id| + .expect("failed to get validator id from account id") + }) + .map(|validator_id| { ::FullIdentificationOf::convert(validator_id.clone()) - .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification")) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification") + }) .collect::>>(); Ok((id_tuples, offenders)) } -fn make_offenders_im_online(num_offenders: u32, num_nominators: u32) -> Result< - (Vec>, Vec>), - &'static str -> { +fn make_offenders_im_online( + num_offenders: u32, + num_nominators: u32, +) -> Result<(Vec>, Vec>), &'static str> { Staking::::new_session(0); let mut offenders = vec![]; - for i in 0 .. num_offenders { + for i in 0..num_offenders { let offender = create_offender::(i + 1, num_nominators)?; offenders.push(offender); } Staking::::start_session(0); - let id_tuples = offenders.iter() - .map(|offender| < + let id_tuples = offenders + .iter() + .map(|offender| { + < ::ValidatorSet as ValidatorSet >::ValidatorIdOf::convert(offender.controller.clone()) - .expect("failed to get validator id from account id")) - .map(|validator_id| < + .expect("failed to get validator id from account id") + }) + .map(|validator_id| { + < ::ValidatorSet as ValidatorSetWithIdentification >::IdentificationOf::convert(validator_id.clone()) .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification")) + .expect("failed to convert validator id to full identification") + }) .collect::>>(); Ok((id_tuples, offenders)) } @@ -224,7 +235,9 @@ fn check_events::Event>>(expec pretty("--Got:", &events); pretty("--Expected:", &expected); format!("Mismatching length. Got: {}, expected: {}", lengths.0, lengths.1) - } else { Default::default() }; + } else { + Default::default() + }; for (idx, (a, b)) in events.into_iter().zip(expected).enumerate() { assert_eq!(a, b, "Mismatch at: {}. {}", idx, length_mismatch); @@ -388,8 +401,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(), - crate::mock::Test, -); +impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test,); diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 4e7a63c58a40b8e64d659f8cee4b3fb84fdbe3df..6fc5ee8b66eb089124f904ed5d5268146660718a 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -20,17 +20,14 @@ #![cfg(test)] use super::*; -use frame_support::{ - parameter_types, - weights::constants::WEIGHT_PER_SECOND, -}; +use frame_election_provider_support::onchain; +use frame_support::{parameter_types, weights::constants::WEIGHT_PER_SECOND}; use frame_system as system; +use pallet_session::historical as pallet_session_historical; use sp_runtime::{ - traits::IdentityLookup, testing::{Header, UintAuthorityId}, + traits::IdentityLookup, }; -use frame_election_provider_support::onchain; -use pallet_session::historical as pallet_session_historical; type AccountId = u64; type AccountIndex = u32; @@ -112,7 +109,8 @@ impl pallet_session::SessionHandler for TestSessionHandler { _: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)], - ) {} + ) { + } fn on_disabled(_: usize) {} } @@ -198,7 +196,10 @@ impl pallet_offences::Config for Test { type OnOffenceHandler = Staking; } -impl frame_system::offchain::SendTransactionTypes for Test where Call: From { +impl frame_system::offchain::SendTransactionTypes for Test +where + Call: From, +{ type Extrinsic = Extrinsic; type OverarchingCall = Call; } diff --git a/substrate/frame/offences/src/lib.rs b/substrate/frame/offences/src/lib.rs index 1076dd615496d0bf0adcdd28089bf5fbc88bbc2e..3392cd6e4a884f2e99db1fab108dc25860885e2b 100644 --- a/substrate/frame/offences/src/lib.rs +++ b/substrate/frame/offences/src/lib.rs @@ -22,18 +22,18 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +mod migration; mod mock; mod tests; -mod migration; -use sp_std::prelude::*; +use codec::{Decode, Encode}; use frame_support::weights::Weight; use sp_runtime::{traits::Hash, Perbill}; use sp_staking::{ offence::{Kind, Offence, OffenceDetails, OffenceError, OnOffenceHandler, ReportOffence}, - SessionIndex + SessionIndex, }; -use codec::{Decode, Encode}; +use sp_std::prelude::*; pub use pallet::*; @@ -44,17 +44,25 @@ type OpaqueTimeSlot = Vec; type ReportIdOf = ::Hash; pub trait WeightInfo { - fn report_offence_im_online(r: u32, o: u32, n: u32, ) -> Weight; - fn report_offence_grandpa(r: u32, n: u32, ) -> Weight; - fn report_offence_babe(r: u32, n: u32, ) -> Weight; - fn on_initialize(d: u32, ) -> Weight; + fn report_offence_im_online(r: u32, o: u32, n: u32) -> Weight; + fn report_offence_grandpa(r: u32, n: u32) -> Weight; + fn report_offence_babe(r: u32, n: u32) -> Weight; + fn on_initialize(d: u32) -> Weight; } impl WeightInfo for () { - fn report_offence_im_online(_r: u32, _o: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn report_offence_grandpa(_r: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn report_offence_babe(_r: u32, _n: u32, ) -> Weight { 1_000_000_000 } - fn on_initialize(_d: u32, ) -> Weight { 1_000_000_000 } + fn report_offence_im_online(_r: u32, _o: u32, _n: u32) -> Weight { + 1_000_000_000 + } + fn report_offence_grandpa(_r: u32, _n: u32) -> Weight { + 1_000_000_000 + } + fn report_offence_babe(_r: u32, _n: u32) -> Weight { + 1_000_000_000 + } + fn on_initialize(_d: u32) -> Weight { + 1_000_000_000 + } } #[frame_support::pallet] @@ -145,22 +153,20 @@ where // Go through all offenders in the offence report and find all offenders that were spotted // in unique reports. - let TriageOutcome { - concurrent_offenders, - } = match Self::triage_offence_report::(reporters, &time_slot, offenders) { - Some(triage) => triage, - // The report contained only duplicates, so there is no need to slash again. - None => return Err(OffenceError::DuplicateReport), - }; + let TriageOutcome { concurrent_offenders } = + match Self::triage_offence_report::(reporters, &time_slot, offenders) { + Some(triage) => triage, + // The report contained only duplicates, so there is no need to slash again. + None => return Err(OffenceError::DuplicateReport), + }; let offenders_count = concurrent_offenders.len() as u32; // The amount new offenders are slashed let new_fraction = O::slash_fraction(offenders_count, validator_set_count); - let slash_perbill: Vec<_> = (0..concurrent_offenders.len()) - .map(|_| new_fraction.clone()) - .collect(); + let slash_perbill: Vec<_> = + (0..concurrent_offenders.len()).map(|_| new_fraction.clone()).collect(); T::OnOffenceHandler::on_offence( &concurrent_offenders, @@ -212,10 +218,7 @@ impl Pallet { any_new = true; >::insert( &report_id, - OffenceDetails { - offender, - reporters: reporters.clone(), - }, + OffenceDetails { offender, reporters: reporters.clone() }, ); storage.insert(time_slot, report_id); @@ -232,9 +235,7 @@ impl Pallet { storage.save(); - Some(TriageOutcome { - concurrent_offenders, - }) + Some(TriageOutcome { concurrent_offenders }) } else { None } @@ -270,20 +271,14 @@ impl> ReportIndexStorage { let concurrent_reports = >::get(&O::ID, &opaque_time_slot); - Self { - opaque_time_slot, - concurrent_reports, - same_kind_reports, - } + Self { opaque_time_slot, concurrent_reports, same_kind_reports } } /// Insert a new report to the index. fn insert(&mut self, time_slot: &O::TimeSlot, report_id: ReportIdOf) { // Insert the report id into the list while maintaining the ordering by the time // slot. - let pos = self - .same_kind_reports - .partition_point(|&(ref when, _)| when <= time_slot); + let pos = self.same_kind_reports.partition_point(|&(ref when, _)| when <= time_slot); self.same_kind_reports.insert(pos, (time_slot.clone(), report_id)); // Update the list of concurrent reports. diff --git a/substrate/frame/offences/src/migration.rs b/substrate/frame/offences/src/migration.rs index ce8a125e7e1a1ce3e2bee3d570a0b096e1445a88..cb5c520392c97e3a856b17b12953550a99dc9dee 100644 --- a/substrate/frame/offences/src/migration.rs +++ b/substrate/frame/offences/src/migration.rs @@ -16,18 +16,13 @@ // limitations under the License. use super::{Config, OffenceDetails, Perbill, SessionIndex}; -use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; +use frame_support::{generate_storage_alias, traits::Get, weights::Weight}; use sp_staking::offence::OnOffenceHandler; use sp_std::vec::Vec; /// Type of data stored as a deferred offence type DeferredOffenceOf = ( - Vec< - OffenceDetails< - ::AccountId, - ::IdentificationTuple, - >, - >, + Vec::AccountId, ::IdentificationTuple>>, Vec, SessionIndex, ); diff --git a/substrate/frame/offences/src/mock.rs b/substrate/frame/offences/src/mock.rs index 5818ae71687b24ead8bb53b2969d843292783f31..84114f015089cb9d54156107995519231bde664e 100644 --- a/substrate/frame/offences/src/mock.rs +++ b/substrate/frame/offences/src/mock.rs @@ -19,22 +19,27 @@ #![cfg(test)] -use std::cell::RefCell; +use crate as offences; use crate::Config; use codec::Encode; -use sp_runtime::Perbill; -use sp_staking::{ - SessionIndex, - offence::{self, Kind, OffenceDetails}, -}; -use sp_runtime::testing::Header; -use sp_runtime::traits::{IdentityLookup, BlakeTwo256}; -use sp_core::H256; use frame_support::{ parameter_types, - weights::{Weight, constants::{WEIGHT_PER_SECOND, RocksDbWeight}}, + weights::{ + constants::{RocksDbWeight, WEIGHT_PER_SECOND}, + Weight, + }, }; -use crate as offences; +use sp_core::H256; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; +use sp_staking::{ + offence::{self, Kind, OffenceDetails}, + SessionIndex, +}; +use std::cell::RefCell; pub struct OnOffenceHandler; @@ -43,8 +48,8 @@ thread_local! { pub static OFFENCE_WEIGHT: RefCell = RefCell::new(Default::default()); } -impl - offence::OnOffenceHandler for OnOffenceHandler +impl offence::OnOffenceHandler + for OnOffenceHandler { fn on_offence( _offenders: &[OffenceDetails], @@ -60,9 +65,7 @@ impl } pub fn with_on_offence_fractions) -> R>(f: F) -> R { - ON_OFFENCE_PERBILL.with(|fractions| { - f(&mut *fractions.borrow_mut()) - }) + ON_OFFENCE_PERBILL.with(|fractions| f(&mut *fractions.borrow_mut())) } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -163,10 +166,7 @@ impl offence::Offence for Offence { 1 } - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill { + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) } } diff --git a/substrate/frame/offences/src/tests.rs b/substrate/frame/offences/src/tests.rs index d2e0f2d63d550e4c0f3780e6206f31e1dbf7d384..18cfa9410a6c6bd1ace5ffaa5341799a322d65ef 100644 --- a/substrate/frame/offences/src/tests.rs +++ b/substrate/frame/offences/src/tests.rs @@ -21,11 +21,11 @@ use super::*; use crate::mock::{ - Offences, System, Offence, Event, KIND, new_test_ext, with_on_offence_fractions, - offence_reports, report_id, + new_test_ext, offence_reports, report_id, with_on_offence_fractions, Event, Offence, Offences, + System, KIND, }; -use sp_runtime::Perbill; use frame_system::{EventRecord, Phase}; +use sp_runtime::Perbill; #[test] fn should_report_an_authority_and_trigger_on_offence() { @@ -34,11 +34,7 @@ fn should_report_an_authority_and_trigger_on_offence() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; // when Offences::report_offence(vec![], offence).unwrap(); @@ -57,11 +53,7 @@ fn should_not_report_the_same_authority_twice_in_the_same_slot() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -79,7 +71,6 @@ fn should_not_report_the_same_authority_twice_in_the_same_slot() { }); } - #[test] fn should_report_in_different_time_slot() { new_test_ext().execute_with(|| { @@ -87,11 +78,7 @@ fn should_report_in_different_time_slot() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let mut offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let mut offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -117,11 +104,7 @@ fn should_deposit_event() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; // when Offences::report_offence(vec![], offence).unwrap(); @@ -145,11 +128,7 @@ fn doesnt_deposit_event_for_dups() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; + let offence = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -181,33 +160,26 @@ fn reports_if_an_offence_is_dup() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence = |time_slot, offenders| TestOffence { - validator_set_count: 5, - time_slot, - offenders, - }; + let offence = + |time_slot, offenders| TestOffence { validator_set_count: 5, time_slot, offenders }; let mut test_offence = offence(time_slot, vec![0]); // the report for authority 0 at time slot 42 should not be a known // offence - assert!( - !>::is_known_offence( - &test_offence.offenders, - &test_offence.time_slot - ) - ); + assert!(!>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + )); // we report an offence for authority 0 at time slot 42 Offences::report_offence(vec![], test_offence.clone()).unwrap(); // the same report should be a known offence now - assert!( - >::is_known_offence( - &test_offence.offenders, - &test_offence.time_slot - ) - ); + assert!(>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + )); // and reporting it again should yield a duplicate report error assert_eq!( @@ -219,28 +191,21 @@ fn reports_if_an_offence_is_dup() { test_offence.offenders.push(1); // it should not be a known offence anymore - assert!( - !>::is_known_offence( - &test_offence.offenders, - &test_offence.time_slot - ) - ); + assert!(!>::is_known_offence( + &test_offence.offenders, + &test_offence.time_slot + )); // and reporting it again should work without any error - assert_eq!( - Offences::report_offence(vec![], test_offence.clone()), - Ok(()) - ); + assert_eq!(Offences::report_offence(vec![], test_offence.clone()), Ok(())); // creating a new offence for the same authorities on the next slot // should be considered a new offence and thefore not known let test_offence_next_slot = offence(time_slot + 1, vec![0, 1]); - assert!( - !>::is_known_offence( - &test_offence_next_slot.offenders, - &test_offence_next_slot.time_slot - ) - ); + assert!(!>::is_known_offence( + &test_offence_next_slot.offenders, + &test_offence_next_slot.time_slot + )); }); } @@ -253,16 +218,8 @@ fn should_properly_count_offences() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence1 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - let offence2 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![4], - }; + let offence1 = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; + let offence2 = Offence { validator_set_count: 5, time_slot, offenders: vec![4] }; Offences::report_offence(vec![], offence1).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -294,26 +251,12 @@ fn should_properly_sort_offences() { let time_slot = 42; assert_eq!(offence_reports(KIND, time_slot), vec![]); - let offence1 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - let offence2 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![4], - }; - let offence3 = Offence { - validator_set_count: 5, - time_slot: time_slot + 1, - offenders: vec![6, 7], - }; - let offence4 = Offence { - validator_set_count: 5, - time_slot: time_slot - 1, - offenders: vec![3], - }; + let offence1 = Offence { validator_set_count: 5, time_slot, offenders: vec![5] }; + let offence2 = Offence { validator_set_count: 5, time_slot, offenders: vec![4] }; + let offence3 = + Offence { validator_set_count: 5, time_slot: time_slot + 1, offenders: vec![6, 7] }; + let offence4 = + Offence { validator_set_count: 5, time_slot: time_slot - 1, offenders: vec![3] }; Offences::report_offence(vec![], offence1).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); @@ -327,10 +270,10 @@ fn should_properly_sort_offences() { Offences::report_offence(vec![], offence4).unwrap(); // then - let same_kind_reports = - Vec::<(u128, sp_core::H256)>::decode( - &mut &crate::ReportsByKindIndex::::get(KIND)[..], - ).unwrap(); + let same_kind_reports = Vec::<(u128, sp_core::H256)>::decode( + &mut &crate::ReportsByKindIndex::::get(KIND)[..], + ) + .unwrap(); assert_eq!( same_kind_reports, vec![ diff --git a/substrate/frame/proxy/src/benchmarking.rs b/substrate/frame/proxy/src/benchmarking.rs index 336a80dd4ac5f590e9d430c555e36c6ae3c76043..a06c22a3ed8fed0fd65b615cb6c1876683f3d265 100644 --- a/substrate/frame/proxy/src/benchmarking.rs +++ b/substrate/frame/proxy/src/benchmarking.rs @@ -20,10 +20,10 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use crate::Pallet as Proxy; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use crate::Pallet as Proxy; const SEED: u32 = 0; @@ -48,7 +48,7 @@ fn add_proxies(n: u32, maybe_who: Option) -> Result<(), fn add_announcements( n: u32, maybe_who: Option, - maybe_real: Option + maybe_real: Option, ) -> Result<(), &'static str> { let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED)); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); @@ -247,8 +247,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Proxy, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/proxy/src/lib.rs b/substrate/frame/proxy/src/lib.rs index d4f430a7e8b0b1e114caa1f7d92d7136cbb89b2b..56932669ed8c0abbeeb5fcb6887810aee44b732d 100644 --- a/substrate/frame/proxy/src/lib.rs +++ b/substrate/frame/proxy/src/lib.rs @@ -29,39 +29,39 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -mod tests; mod benchmarking; +mod tests; pub mod weights; -use sp_std::{prelude::*, convert::TryInto}; -use codec::{Encode, Decode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; +use frame_support::{ + dispatch::{DispatchError, DispatchResultWithPostInfo, PostDispatchInfo}, + ensure, + traits::{Currency, Get, InstanceFilter, IsSubType, IsType, OriginTrait, ReservableCurrency}, + weights::GetDispatchInfo, + RuntimeDebug, +}; +use frame_system::{self as system}; use sp_io::hashing::blake2_256; use sp_runtime::{ + traits::{Dispatchable, Hash, Saturating, Zero}, DispatchResult, - traits::{Dispatchable, Zero, Hash, Saturating} }; -use frame_support::{ - RuntimeDebug, ensure, - dispatch::{DispatchResultWithPostInfo, PostDispatchInfo}, - traits::{ - Get, ReservableCurrency, Currency, InstanceFilter, OriginTrait, - IsType, IsSubType, - }, - weights::GetDispatchInfo, -}; -use frame_system::{self as system}; -use frame_support::dispatch::DispatchError; +use sp_std::{convert::TryInto, prelude::*}; pub use weights::WeightInfo; pub use pallet::*; type CallHashOf = <::CallHasher as Hash>::Output; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// The parameters under which a particular account has a proxy relationship with some other /// account. -#[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, MaxEncodedLen)] +#[derive( + Encode, Decode, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug, MaxEncodedLen, +)] pub struct ProxyDefinition { /// The account which may act on behalf of another. pub delegate: AccountId, @@ -85,9 +85,9 @@ pub struct Announcement { #[frame_support::pallet] pub mod pallet { + use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::{*, DispatchResult}; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -101,8 +101,11 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// The overarching call type. - type Call: Parameter + Dispatchable - + GetDispatchInfo + From> + IsSubType> + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From> + + IsSubType> + IsType<::Call>; /// The currency mechanism. @@ -112,8 +115,13 @@ pub mod pallet { /// The instance filter determines whether a given call may be proxied under this type. /// /// IMPORTANT: `Default` must be provided and MUST BE the the *most permissive* value. - type ProxyType: Parameter + Member + Ord + PartialOrd + InstanceFilter<::Call> - + Default + MaxEncodedLen; + type ProxyType: Parameter + + Member + + Ord + + PartialOrd + + InstanceFilter<::Call> + + Default + + MaxEncodedLen; /// The base amount of currency needed to reserve for creating a proxy. /// @@ -291,21 +299,17 @@ pub mod pallet { origin: OriginFor, proxy_type: T::ProxyType, delay: T::BlockNumber, - index: u16 + index: u16, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let anonymous = Self::anonymous_account(&who, &proxy_type, index, None); ensure!(!Proxies::::contains_key(&anonymous), Error::::Duplicate); - let proxy_def = ProxyDefinition { - delegate: who.clone(), - proxy_type: proxy_type.clone(), - delay, - }; - let bounded_proxies: BoundedVec<_, T::MaxProxies> = vec![proxy_def] - .try_into() - .map_err(|_| Error::::TooMany)?; + let proxy_def = + ProxyDefinition { delegate: who.clone(), proxy_type: proxy_type.clone(), delay }; + let bounded_proxies: BoundedVec<_, T::MaxProxies> = + vec![proxy_def].try_into().map_err(|_| Error::::TooMany)?; let deposit = T::ProxyDepositBase::get() + T::ProxyDepositFactor::get(); T::Currency::reserve(&who, deposit)?; @@ -382,10 +386,12 @@ pub mod pallet { pub fn announce( origin: OriginFor, real: T::AccountId, - call_hash: CallHashOf - ) -> DispatchResultWithPostInfo{ + call_hash: CallHashOf, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - Proxies::::get(&real).0.into_iter() + Proxies::::get(&real) + .0 + .into_iter() .find(|x| &x.delegate == &who) .ok_or(Error::::NotProxy)?; @@ -403,7 +409,10 @@ pub mod pallet { T::AnnouncementDepositBase::get(), T::AnnouncementDepositFactor::get(), pending.len(), - ).map(|d| d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed")) + ) + .map(|d| { + d.expect("Just pushed; pending.len() > 0; rejig_deposit returns Some; qed") + }) .map(|d| *deposit = d) })?; Self::deposit_event(Event::Announced(real, who, call_hash)); @@ -433,7 +442,7 @@ pub mod pallet { pub fn remove_announcement( origin: OriginFor, real: T::AccountId, - call_hash: CallHashOf + call_hash: CallHashOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; Self::edit_announcements(&who, |ann| ann.real != real || ann.call_hash != call_hash)?; @@ -463,10 +472,12 @@ pub mod pallet { pub fn reject_announcement( origin: OriginFor, delegate: T::AccountId, - call_hash: CallHashOf + call_hash: CallHashOf, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; - Self::edit_announcements(&delegate, |ann| ann.real != who || ann.call_hash != call_hash)?; + Self::edit_announcements(&delegate, |ann| { + ann.real != who || ann.call_hash != call_hash + })?; Ok(().into()) } @@ -508,9 +519,12 @@ pub mod pallet { let call_hash = T::CallHasher::hash_of(&call); let now = system::Pallet::::block_number(); - Self::edit_announcements(&delegate, |ann| - ann.real != real || ann.call_hash != call_hash || now.saturating_sub(ann.height) < def.delay - ).map_err(|_| Error::::Unannounced)?; + Self::edit_announcements(&delegate, |ann| { + ann.real != real || + ann.call_hash != call_hash || + now.saturating_sub(ann.height) < def.delay + }) + .map_err(|_| Error::::Unannounced)?; Self::do_proxy(def, real, *call); @@ -521,8 +535,7 @@ pub mod pallet { #[pallet::event] #[pallet::metadata(T::AccountId = "AccountId", T::ProxyType = "ProxyType", CallHashOf = "Hash")] #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event - { + pub enum Event { /// A proxy was executed correctly, with the given \[result\]. ProxyExecuted(DispatchResult), /// Anonymous account has been created by new proxy with given @@ -533,10 +546,10 @@ pub mod pallet { } /// Old name generated by `decl_event`. - #[deprecated(note="use `Event` instead")] + #[deprecated(note = "use `Event` instead")] pub type RawEvent = Event; - #[pallet::error] + #[pallet::error] pub enum Error { /// There are too many proxies registered or too many announcements pending. TooMany, @@ -565,13 +578,10 @@ pub mod pallet { Twox64Concat, T::AccountId, ( - BoundedVec< - ProxyDefinition, - T::MaxProxies, - >, - BalanceOf + BoundedVec, T::MaxProxies>, + BalanceOf, ), - ValueQuery + ValueQuery, >; /// The announcements made by the proxy (key). @@ -582,19 +592,14 @@ pub mod pallet { Twox64Concat, T::AccountId, ( - BoundedVec< - Announcement, T::BlockNumber>, - T::MaxPending, - >, + BoundedVec, T::BlockNumber>, T::MaxPending>, BalanceOf, ), - ValueQuery + ValueQuery, >; - } impl Pallet { - /// Calculate the address of an anonymous account. /// /// - `who`: The spawner account. @@ -612,10 +617,12 @@ impl Pallet { index: u16, maybe_when: Option<(T::BlockNumber, u32)>, ) -> T::AccountId { - let (height, ext_index) = maybe_when.unwrap_or_else(|| ( - system::Pallet::::block_number(), - system::Pallet::::extrinsic_index().unwrap_or_default() - )); + let (height, ext_index) = maybe_when.unwrap_or_else(|| { + ( + system::Pallet::::block_number(), + system::Pallet::::extrinsic_index().unwrap_or_default(), + ) + }); let entropy = (b"modlpy/proxy____", who, height, ext_index, proxy_type, index) .using_encoded(blake2_256); T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() @@ -698,26 +705,22 @@ impl Pallet { factor: BalanceOf, len: usize, ) -> Result>, DispatchError> { - let new_deposit = if len == 0 { - BalanceOf::::zero() - } else { - base + factor * (len as u32).into() - }; + let new_deposit = + if len == 0 { BalanceOf::::zero() } else { base + factor * (len as u32).into() }; if new_deposit > old_deposit { T::Currency::reserve(&who, new_deposit - old_deposit)?; } else if new_deposit < old_deposit { T::Currency::unreserve(&who, old_deposit - new_deposit); } - Ok(if len == 0 { - None - } else { - Some(new_deposit) - }) + Ok(if len == 0 { None } else { Some(new_deposit) }) } fn edit_announcements< - F: FnMut(&Announcement, T::BlockNumber>) -> bool - >(delegate: &T::AccountId, f: F) -> DispatchResult { + F: FnMut(&Announcement, T::BlockNumber>) -> bool, + >( + delegate: &T::AccountId, + f: F, + ) -> DispatchResult { Announcements::::try_mutate_exists(delegate, |x| { let (mut pending, old_deposit) = x.take().ok_or(Error::::NotFound)?; let orig_pending_len = pending.len(); @@ -729,7 +732,8 @@ impl Pallet { T::AnnouncementDepositBase::get(), T::AnnouncementDepositFactor::get(), pending.len(), - )?.map(|deposit| (pending, deposit)); + )? + .map(|deposit| (pending, deposit)); Ok(()) }) } @@ -740,7 +744,8 @@ impl Pallet { force_proxy_type: Option, ) -> Result, DispatchError> { let f = |x: &ProxyDefinition| -> bool { - &x.delegate == delegate && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) + &x.delegate == delegate && + force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) }; Ok(Proxies::::get(real).0.into_iter().find(f).ok_or(Error::::NotProxy)?) } @@ -758,11 +763,13 @@ impl Pallet { match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already has. Some(Call::add_proxy(_, ref pt, _)) | Some(Call::remove_proxy(_, ref pt, _)) - if !def.proxy_type.is_superset(&pt) => false, + if !def.proxy_type.is_superset(&pt) => + false, // Proxy call cannot remove all proxies or kill anonymous proxies unless it has full permissions. Some(Call::remove_proxies(..)) | Some(Call::kill_anonymous(..)) - if def.proxy_type != T::ProxyType::default() => false, - _ => def.proxy_type.filter(c) + if def.proxy_type != T::ProxyType::default() => + false, + _ => def.proxy_type.filter(c), } }); let e = call.dispatch(origin); diff --git a/substrate/frame/proxy/src/tests.rs b/substrate/frame/proxy/src/tests.rs index 4383fbea007139db46793ac3665e89565b305ae1..536a226c7b46df776883942c3a969f94ecb9cd11 100644 --- a/substrate/frame/proxy/src/tests.rs +++ b/substrate/frame/proxy/src/tests.rs @@ -21,13 +21,16 @@ use super::*; +use crate as proxy; +use codec::{Decode, Encode}; use frame_support::{ - assert_ok, assert_noop, parameter_types, RuntimeDebug, dispatch::DispatchError, traits::Filter, + assert_noop, assert_ok, dispatch::DispatchError, parameter_types, traits::Filter, RuntimeDebug, }; -use codec::{Encode, Decode}; use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use crate as proxy; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -102,18 +105,25 @@ parameter_types! { pub const AnnouncementDepositBase: u64 = 1; pub const AnnouncementDepositFactor: u64 = 1; } -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, MaxEncodedLen)] +#[derive( + Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, RuntimeDebug, MaxEncodedLen, +)] pub enum ProxyType { Any, JustTransfer, JustUtility, } -impl Default for ProxyType { fn default() -> Self { Self::Any } } +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} impl InstanceFilter for ProxyType { fn filter(&self, c: &Call) -> bool { match self { ProxyType::Any => true, - ProxyType::JustTransfer => matches!(c, Call::Balances(pallet_balances::Call::transfer(..))), + ProxyType::JustTransfer => + matches!(c, Call::Balances(pallet_balances::Call::transfer(..))), ProxyType::JustUtility => matches!(c, Call::Utility(..)), } } @@ -147,27 +157,31 @@ impl Config for Test { type AnnouncementDepositFactor = AnnouncementDepositFactor; } +use super::{Call as ProxyCall, Event as ProxyEvent}; use frame_system::Call as SystemCall; -use pallet_balances::Call as BalancesCall; -use pallet_balances::Error as BalancesError; -use pallet_balances::Event as BalancesEvent; -use pallet_utility::Call as UtilityCall; -use pallet_utility::Event as UtilityEvent; -use super::Event as ProxyEvent; -use super::Call as ProxyCall; +use pallet_balances::{Call as BalancesCall, Error as BalancesError, Event as BalancesEvent}; +use pallet_utility::{Call as UtilityCall, Event as UtilityEvent}; pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 2)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } fn last_events(n: usize) -> Vec { - system::Pallet::::events().into_iter().rev().take(n).rev().map(|e| e.event).collect() + system::Pallet::::events() + .into_iter() + .rev() + .take(n) + .rev() + .map(|e| e.event) + .collect() } fn expect_events(e: Vec) { @@ -183,27 +197,21 @@ fn announcement_works() { assert_ok!(Proxy::announce(Origin::signed(3), 1, [1; 32].into())); let announcements = Announcements::::get(3); - assert_eq!(announcements.0, vec![Announcement { - real: 1, - call_hash: [1; 32].into(), - height: 1, - }]); + assert_eq!( + announcements.0, + vec![Announcement { real: 1, call_hash: [1; 32].into(), height: 1 }] + ); assert_eq!(Balances::reserved_balance(3), announcements.1); assert_ok!(Proxy::announce(Origin::signed(3), 2, [2; 32].into())); let announcements = Announcements::::get(3); - assert_eq!(announcements.0, vec![ - Announcement { - real: 1, - call_hash: [1; 32].into(), - height: 1, - }, - Announcement { - real: 2, - call_hash: [2; 32].into(), - height: 1, - }, - ]); + assert_eq!( + announcements.0, + vec![ + Announcement { real: 1, call_hash: [1; 32].into(), height: 1 }, + Announcement { real: 2, call_hash: [2; 32].into(), height: 1 }, + ] + ); assert_eq!(Balances::reserved_balance(3), announcements.1); assert_noop!(Proxy::announce(Origin::signed(3), 2, [3; 32].into()), Error::::TooMany); @@ -221,11 +229,10 @@ fn remove_announcement_works() { assert_noop!(Proxy::remove_announcement(Origin::signed(3), 1, [0; 32].into()), e); assert_ok!(Proxy::remove_announcement(Origin::signed(3), 1, [1; 32].into())); let announcements = Announcements::::get(3); - assert_eq!(announcements.0, vec![Announcement { - real: 2, - call_hash: [2; 32].into(), - height: 1, - }]); + assert_eq!( + announcements.0, + vec![Announcement { real: 2, call_hash: [2; 32].into(), height: 1 }] + ); assert_eq!(Balances::reserved_balance(3), announcements.1); }); } @@ -243,11 +250,10 @@ fn reject_announcement_works() { assert_noop!(Proxy::reject_announcement(Origin::signed(4), 3, [1; 32].into()), e); assert_ok!(Proxy::reject_announcement(Origin::signed(1), 3, [1; 32].into())); let announcements = Announcements::::get(3); - assert_eq!(announcements.0, vec![Announcement { - real: 2, - call_hash: [2; 32].into(), - height: 1, - }]); + assert_eq!( + announcements.0, + vec![Announcement { real: 2, call_hash: [2; 32].into(), height: 1 }] + ); assert_eq!(Balances::reserved_balance(3), announcements.1); }); } @@ -291,11 +297,7 @@ fn proxy_announced_removes_announcement_and_returns_deposit() { system::Pallet::::set_block_number(2); assert_ok!(Proxy::proxy_announced(Origin::signed(0), 3, 1, None, call.clone())); let announcements = Announcements::::get(3); - assert_eq!(announcements.0, vec![Announcement { - real: 2, - call_hash, - height: 1, - }]); + assert_eq!(announcements.0, vec![Announcement { real: 2, call_hash, height: 1 }]); assert_eq!(Balances::reserved_balance(3), announcements.1); }); } @@ -330,7 +332,10 @@ fn filtering_works() { let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![ + UtilityEvent::BatchCompleted.into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), + ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); @@ -342,7 +347,10 @@ fn filtering_works() { let inner = Box::new(Call::Proxy(ProxyCall::add_proxy(5, ProxyType::Any, 0))); let call = Box::new(Call::Utility(UtilityCall::batch(vec![*inner]))); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![UtilityEvent::BatchCompleted.into(), ProxyEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![ + UtilityEvent::BatchCompleted.into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), + ]); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); @@ -357,7 +365,10 @@ fn filtering_works() { assert_ok!(Proxy::proxy(Origin::signed(4), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(2), 1, None, call.clone())); - expect_events(vec![BalancesEvent::::Unreserved(1, 5).into(), ProxyEvent::ProxyExecuted(Ok(())).into()]); + expect_events(vec![ + BalancesEvent::::Unreserved(1, 5).into(), + ProxyEvent::ProxyExecuted(Ok(())).into(), + ]); }); } @@ -365,7 +376,10 @@ fn filtering_works() { fn add_remove_proxies_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0)); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), Error::::Duplicate); + assert_noop!( + Proxy::add_proxy(Origin::signed(1), 2, ProxyType::Any, 0), + Error::::Duplicate + ); assert_eq!(Balances::reserved_balance(1), 2); assert_ok!(Proxy::add_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 3); @@ -373,8 +387,14 @@ fn add_remove_proxies_works() { assert_eq!(Balances::reserved_balance(1), 4); assert_ok!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 5); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), Error::::TooMany); - assert_noop!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), Error::::NotFound); + assert_noop!( + Proxy::add_proxy(Origin::signed(1), 4, ProxyType::Any, 0), + Error::::TooMany + ); + assert_noop!( + Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::JustTransfer, 0), + Error::::NotFound + ); assert_ok!(Proxy::remove_proxy(Origin::signed(1), 4, ProxyType::JustUtility, 0)); assert_eq!(Balances::reserved_balance(1), 4); assert_ok!(Proxy::remove_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); @@ -383,7 +403,10 @@ fn add_remove_proxies_works() { assert_eq!(Balances::reserved_balance(1), 2); assert_ok!(Proxy::remove_proxy(Origin::signed(1), 2, ProxyType::JustTransfer, 0)); assert_eq!(Balances::reserved_balance(1), 0); - assert_noop!(Proxy::add_proxy(Origin::signed(1), 1, ProxyType::Any, 0), Error::::NoSelfProxy); + assert_noop!( + Proxy::add_proxy(Origin::signed(1), 1, ProxyType::Any, 0), + Error::::NoSelfProxy + ); }); } @@ -406,7 +429,10 @@ fn proxying_works() { assert_ok!(Proxy::add_proxy(Origin::signed(1), 3, ProxyType::Any, 0)); let call = Box::new(Call::Balances(BalancesCall::transfer(6, 1))); - assert_noop!(Proxy::proxy(Origin::signed(4), 1, None, call.clone()), Error::::NotProxy); + assert_noop!( + Proxy::proxy(Origin::signed(4), 1, None, call.clone()), + Error::::NotProxy + ); assert_noop!( Proxy::proxy(Origin::signed(2), 1, Some(ProxyType::Any), call.clone()), Error::::NotProxy @@ -420,7 +446,9 @@ fn proxying_works() { System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); let call = Box::new(Call::Balances(BalancesCall::transfer_keep_alive(6, 1))); - assert_ok!(Call::Proxy(super::Call::proxy(1, None, call.clone())).dispatch(Origin::signed(2))); + assert_ok!( + Call::Proxy(super::Call::proxy(1, None, call.clone())).dispatch(Origin::signed(2)) + ); System::assert_last_event(ProxyEvent::ProxyExecuted(Err(DispatchError::BadOrigin)).into()); assert_ok!(Proxy::proxy(Origin::signed(3), 1, None, call.clone())); System::assert_last_event(ProxyEvent::ProxyExecuted(Ok(())).into()); @@ -433,14 +461,19 @@ fn anonymous_works() { new_test_ext().execute_with(|| { assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); let anon = Proxy::anonymous_account(&1, &ProxyType::Any, 0, None); - System::assert_last_event(ProxyEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0).into()); + System::assert_last_event( + ProxyEvent::AnonymousCreated(anon.clone(), 1, ProxyType::Any, 0).into(), + ); // other calls to anonymous allowed as long as they're not exactly the same. assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::JustTransfer, 0, 0)); assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 1)); let anon2 = Proxy::anonymous_account(&2, &ProxyType::Any, 0, None); assert_ok!(Proxy::anonymous(Origin::signed(2), ProxyType::Any, 0, 0)); - assert_noop!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), Error::::Duplicate); + assert_noop!( + Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0), + Error::::Duplicate + ); System::set_extrinsic_index(1); assert_ok!(Proxy::anonymous(Origin::signed(1), ProxyType::Any, 0, 0)); System::set_extrinsic_index(0); @@ -464,6 +497,9 @@ fn anonymous_works() { assert_eq!(Balances::free_balance(1), 0); assert_ok!(Proxy::proxy(Origin::signed(1), anon, None, call.clone())); assert_eq!(Balances::free_balance(1), 2); - assert_noop!(Proxy::proxy(Origin::signed(1), anon, None, call.clone()), Error::::NotProxy); + assert_noop!( + Proxy::proxy(Origin::signed(1), anon, None, call.clone()), + Error::::NotProxy + ); }); } diff --git a/substrate/frame/proxy/src/weights.rs b/substrate/frame/proxy/src/weights.rs index f250186ad81d7684037e65b4c66ea9a0ac13ec7c..872c7b79fb6086639eeb99e1fc1fce2694271104 100644 --- a/substrate/frame/proxy/src/weights.rs +++ b/substrate/frame/proxy/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/randomness-collective-flip/src/lib.rs b/substrate/frame/randomness-collective-flip/src/lib.rs index 1ff7d4382da14e76c1290370b95c41712dbd59b1..64a263dd5bbd06f481a6d7d672b5c772abe11bfc 100644 --- a/substrate/frame/randomness-collective-flip/src/lib.rs +++ b/substrate/frame/randomness-collective-flip/src/lib.rs @@ -69,9 +69,9 @@ use safe_mix::TripletMix; use codec::Encode; -use sp_std::{prelude::*, convert::TryInto}; -use sp_runtime::traits::{Hash, Saturating}; use frame_support::traits::Randomness; +use sp_runtime::traits::{Hash, Saturating}; +use sp_std::{convert::TryInto, prelude::*}; const RANDOM_MATERIAL_LEN: u32 = 81; @@ -85,9 +85,9 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -101,11 +101,13 @@ pub mod pallet { fn on_initialize(block_number: T::BlockNumber) -> Weight { let parent_hash = >::parent_hash(); - >::mutate(|ref mut values| if values.len() < RANDOM_MATERIAL_LEN as usize { - values.push(parent_hash) - } else { - let index = block_number_to_index::(block_number); - values[index] = parent_hash; + >::mutate(|ref mut values| { + if values.len() < RANDOM_MATERIAL_LEN as usize { + values.push(parent_hash) + } else { + let index = block_number_to_index::(block_number); + values[index] = parent_hash; + } }); T::DbWeight::get().reads_writes(1, 1) @@ -117,8 +119,7 @@ pub mod pallet { /// the oldest hash. #[pallet::storage] #[pallet::getter(fn random_material)] - pub(super) type RandomMaterial = - StorageValue<_, Vec, ValueQuery>; + pub(super) type RandomMaterial = StorageValue<_, Vec, ValueQuery>; } impl Randomness for Pallet { @@ -151,17 +152,14 @@ impl Randomness for Pallet { T::Hash::default() }; - ( - seed, - block_number.saturating_sub(RANDOM_MATERIAL_LEN.into()), - ) + (seed, block_number.saturating_sub(RANDOM_MATERIAL_LEN.into())) } } #[cfg(test)] mod tests { - use crate as pallet_randomness_collective_flip; use super::*; + use crate as pallet_randomness_collective_flip; use sp_core::H256; use sp_runtime::{ @@ -169,7 +167,10 @@ mod tests { traits::{BlakeTwo256, Header as _, IdentityLookup}, }; - use frame_support::{parameter_types, traits::{Randomness, OnInitialize}}; + use frame_support::{ + parameter_types, + traits::{OnInitialize, Randomness}, + }; use frame_system::limits; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -229,7 +230,7 @@ mod tests { #[test] fn test_block_number_to_index() { - for i in 1 .. 1000 { + for i in 1..1000 { assert_eq!((i - 1) as usize % 81, block_number_to_index::(i)); } } @@ -237,13 +238,8 @@ mod tests { fn setup_blocks(blocks: u64) { let mut parent_hash = System::parent_hash(); - for i in 1 .. (blocks + 1) { - System::initialize( - &i, - &parent_hash, - &Default::default(), - frame_system::InitKind::Full, - ); + for i in 1..(blocks + 1) { + System::initialize(&i, &parent_hash, &Default::default(), frame_system::InitKind::Full); CollectiveFlip::on_initialize(i); let header = System::finalize(); diff --git a/substrate/frame/recovery/src/lib.rs b/substrate/frame/recovery/src/lib.rs index 6f5c7ebcb6e4aeea95241c2c8bae102024b2dbaf..0214a38b0e8e1f04021e6372caa0739495bb544c 100644 --- a/substrate/frame/recovery/src/lib.rs +++ b/substrate/frame/recovery/src/lib.rs @@ -151,14 +151,15 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +use codec::{Decode, Encode}; +use sp_runtime::traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion}; use sp_std::prelude::*; -use sp_runtime::traits::{Dispatchable, SaturatedConversion, CheckedAdd, CheckedMul}; -use codec::{Encode, Decode}; use frame_support::{ - RuntimeDebug, weights::GetDispatchInfo, - traits::{Currency, ReservableCurrency, BalanceStatus}, dispatch::PostDispatchInfo, + traits::{BalanceStatus, Currency, ReservableCurrency}, + weights::GetDispatchInfo, + RuntimeDebug, }; pub use pallet::*; @@ -200,10 +201,10 @@ pub struct RecoveryConfig { #[frame_support::pallet] pub mod pallet { - use frame_support::{ensure, Parameter, pallet_prelude::*, traits::Get}; - use frame_system::{pallet_prelude::*, ensure_signed, ensure_root}; - use sp_runtime::ArithmeticError; use super::*; + use frame_support::{ensure, pallet_prelude::*, traits::Get, Parameter}; + use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; + use sp_runtime::ArithmeticError; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -216,7 +217,9 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// The overarching call type. - type Call: Parameter + Dispatchable + GetDispatchInfo; + type Call: Parameter + + Dispatchable + + GetDispatchInfo; /// The currency mechanism. type Currency: ReservableCurrency; @@ -313,7 +316,8 @@ pub mod pallet { #[pallet::getter(fn recovery_config)] pub type Recoverable = StorageMap< _, - Twox64Concat, T::AccountId, + Twox64Concat, + T::AccountId, RecoveryConfig, T::AccountId>, >; @@ -323,10 +327,12 @@ pub mod pallet { /// is the user trying to recover the account. #[pallet::storage] #[pallet::getter(fn active_recovery)] - pub type ActiveRecoveries= StorageDoubleMap< + pub type ActiveRecoveries = StorageDoubleMap< _, - Twox64Concat, T::AccountId, - Twox64Concat, T::AccountId, + Twox64Concat, + T::AccountId, + Twox64Concat, + T::AccountId, ActiveRecovery, T::AccountId>, >; @@ -365,14 +371,15 @@ pub mod pallet { pub fn as_recovered( origin: OriginFor, account: T::AccountId, - call: Box<::Call> + call: Box<::Call>, ) -> DispatchResult { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; ensure!(&target == &account, Error::::NotAllowed); call.dispatch(frame_system::RawOrigin::Signed(account).into()) - .map(|_| ()).map_err(|e| e.error) + .map(|_| ()) + .map_err(|e| e.error) } /// Allow ROOT to bypass the recovery process and set an a rescuer account @@ -433,7 +440,7 @@ pub mod pallet { origin: OriginFor, friends: Vec, threshold: u16, - delay_period: T::BlockNumber + delay_period: T::BlockNumber, ) -> DispatchResult { let who = ensure_signed(origin)?; // Check account is not already set up for recovery @@ -455,12 +462,8 @@ pub mod pallet { // Reserve the deposit T::Currency::reserve(&who, total_deposit)?; // Create the recovery configuration - let recovery_config = RecoveryConfig { - delay_period, - deposit: total_deposit, - friends, - threshold, - }; + let recovery_config = + RecoveryConfig { delay_period, deposit: total_deposit, friends, threshold }; // Create the recovery configuration storage item >::insert(&who, recovery_config); @@ -496,7 +499,10 @@ pub mod pallet { // Check that the account is recoverable ensure!(>::contains_key(&account), Error::::NotRecoverable); // Check that the recovery process has not already been started - ensure!(!>::contains_key(&account, &who), Error::::AlreadyStarted); + ensure!( + !>::contains_key(&account, &who), + Error::::AlreadyStarted + ); // Take recovery deposit let recovery_deposit = T::RecoveryDeposit::get(); T::Currency::reserve(&who, recovery_deposit)?; @@ -541,13 +547,14 @@ pub mod pallet { pub fn vouch_recovery( origin: OriginFor, lost: T::AccountId, - rescuer: T::AccountId + rescuer: T::AccountId, ) -> DispatchResult { let who = ensure_signed(origin)?; // Get the recovery configuration for the lost account. let recovery_config = Self::recovery_config(&lost).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer. - let mut active_recovery = Self::active_recovery(&lost, &rescuer).ok_or(Error::::NotStarted)?; + let mut active_recovery = + Self::active_recovery(&lost, &rescuer).ok_or(Error::::NotStarted)?; // Make sure the voter is a friend ensure!(Self::is_friend(&recovery_config.friends, &who), Error::::NotFriend); // Either insert the vouch, or return an error that the user already vouched. @@ -585,13 +592,16 @@ pub mod pallet { pub fn claim_recovery(origin: OriginFor, account: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Get the recovery configuration for the lost account - let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; + let recovery_config = + Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer - let active_recovery = Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; + let active_recovery = + Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; ensure!(!Proxy::::contains_key(&who), Error::::AlreadyProxy); // Make sure the delay period has passed let current_block_number = >::block_number(); - let recoverable_block_number = active_recovery.created + let recoverable_block_number = active_recovery + .created .checked_add(&recovery_config.delay_period) .ok_or(ArithmeticError::Overflow)?; ensure!(recoverable_block_number <= current_block_number, Error::::DelayPeriod); @@ -631,10 +641,16 @@ pub mod pallet { pub fn close_recovery(origin: OriginFor, rescuer: T::AccountId) -> DispatchResult { let who = ensure_signed(origin)?; // Take the active recovery process started by the rescuer for this account. - let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; + let active_recovery = + >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; // Move the reserved funds from the rescuer to the rescued account. // Acts like a slashing mechanism for those who try to maliciously recover accounts. - let res = T::Currency::repatriate_reserved(&rescuer, &who, active_recovery.deposit, BalanceStatus::Free); + let res = T::Currency::repatriate_reserved( + &rescuer, + &who, + active_recovery.deposit, + BalanceStatus::Free, + ); debug_assert!(res.is_ok()); Self::deposit_event(Event::::RecoveryClosed(who, rescuer)); Ok(()) diff --git a/substrate/frame/recovery/src/mock.rs b/substrate/frame/recovery/src/mock.rs index 9139cc12ce54a73f977e0004211d8b20d42b9462..c9c01e35bf9bbcbeac92f4b2ae89114415de1328 100644 --- a/substrate/frame/recovery/src/mock.rs +++ b/substrate/frame/recovery/src/mock.rs @@ -19,12 +19,16 @@ use super::*; -use frame_support::{parameter_types, traits::{OnInitialize, OnFinalize}}; +use crate as recovery; +use frame_support::{ + parameter_types, + traits::{OnFinalize, OnInitialize}, +}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; -use crate as recovery; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -113,7 +117,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); pallet_balances::GenesisConfig:: { balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/substrate/frame/recovery/src/tests.rs b/substrate/frame/recovery/src/tests.rs index 4c7c6ef108d72fd9c051e1935b6964da256adaff..9065e9afe8861057b57fc1fcc6ac1742d6f9bab0 100644 --- a/substrate/frame/recovery/src/tests.rs +++ b/substrate/frame/recovery/src/tests.rs @@ -18,15 +18,11 @@ //! Tests for the module. use super::*; +use frame_support::{assert_noop, assert_ok, traits::Currency}; use mock::{ - Recovery, Balances, Test, Origin, Call, BalancesCall, RecoveryCall, - new_test_ext, run_to_block -}; -use sp_runtime::traits::{BadOrigin}; -use frame_support::{ - assert_noop, assert_ok, - traits::{Currency}, + new_test_ext, run_to_block, Balances, BalancesCall, Call, Origin, Recovery, RecoveryCall, Test, }; +use sp_runtime::traits::BadOrigin; #[test] fn basic_setup_works() { @@ -118,7 +114,7 @@ fn malicious_recovery_fails() { assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); // shame on you assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // shame on you assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); // shame on you - // We met the threshold, lets try to recover the account...? + // We met the threshold, lets try to recover the account...? assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); // Account 1 needs to wait... run_to_block(19); @@ -136,7 +132,12 @@ fn malicious_recovery_fails() { assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); // Account 5 can remove their recovery config and pick some better friends assert_ok!(Recovery::remove_recovery(Origin::signed(5))); - assert_ok!(Recovery::create_recovery(Origin::signed(5), vec![22, 33, 44], threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + vec![22, 33, 44], + threshold, + delay_period + )); }); } @@ -174,9 +175,7 @@ fn create_recovery_handles_basic_errors() { Error::::NotSorted ); // Already configured - assert_ok!( - Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10) - ); + assert_ok!(Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10)); assert_noop!( Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10), Error::::AlreadyRecoverable @@ -191,17 +190,18 @@ fn create_recovery_works() { let threshold = 3; let delay_period = 10; // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Deposit is taken, and scales with the number of friends they pick // Base 10 + 1 per friends = 13 total reserved assert_eq!(Balances::reserved_balance(5), 13); // Recovery configuration is correctly stored - let recovery_config = RecoveryConfig { - delay_period, - deposit: 13, - friends: friends.clone(), - threshold, - }; + let recovery_config = + RecoveryConfig { delay_period, deposit: 13, friends: friends.clone(), threshold }; assert_eq!(Recovery::recovery_config(5), Some(recovery_config)); }); } @@ -218,10 +218,18 @@ fn initiate_recovery_handles_basic_errors() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Same user cannot recover same account twice assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - assert_noop!(Recovery::initiate_recovery(Origin::signed(1), 5), Error::::AlreadyStarted); + assert_noop!( + Recovery::initiate_recovery(Origin::signed(1), 5), + Error::::AlreadyStarted + ); // No double deposit assert_eq!(Balances::reserved_balance(1), 10); }); @@ -234,17 +242,18 @@ fn initiate_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Recovery can be initiated assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Deposit is reserved assert_eq!(Balances::reserved_balance(1), 10); // Recovery status object is created correctly - let recovery_status = ActiveRecovery { - created: 0, - deposit: 10, - friends: vec![], - }; + let recovery_status = ActiveRecovery { created: 0, deposit: 10, friends: vec![] }; assert_eq!(>::get(&5, &1), Some(recovery_status)); // Multiple users can attempt to recover the same account assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); @@ -255,12 +264,20 @@ fn initiate_recovery_works() { fn vouch_recovery_handles_basic_errors() { new_test_ext().execute_with(|| { // Cannot vouch for non-recoverable account - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotRecoverable); + assert_noop!( + Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Error::::NotRecoverable + ); // Create a recovery process for next tests let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Cannot vouch a recovery process that has not started assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotStarted); // Initiate a recovery process @@ -269,7 +286,10 @@ fn vouch_recovery_handles_basic_errors() { assert_noop!(Recovery::vouch_recovery(Origin::signed(22), 5, 1), Error::::NotFriend); // Cannot vouch twice assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::AlreadyVouched); + assert_noop!( + Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Error::::AlreadyVouched + ); }); } @@ -280,7 +300,12 @@ fn vouch_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); // Vouching works assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); @@ -288,11 +313,7 @@ fn vouch_recovery_works() { assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // Final recovery status object is updated correctly - let recovery_status = ActiveRecovery { - created: 0, - deposit: 10, - friends: vec![2, 3, 4], - }; + let recovery_status = ActiveRecovery { created: 0, deposit: 10, friends: vec![2, 3, 4] }; assert_eq!(>::get(&5, &1), Some(recovery_status)); }); } @@ -306,7 +327,12 @@ fn claim_recovery_handles_basic_errors() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); // Cannot claim an account which has not started the recovery process assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); @@ -328,7 +354,12 @@ fn claim_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); @@ -372,7 +403,12 @@ fn remove_recovery_works() { let friends = vec![2, 3, 4]; let threshold = 3; let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); // Cannot remove a recovery when there are active recoveries. diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index 47375658fb9bca71e1daf85844d168173030dcb4..f6909160c5ee00d51d8ce91b55c00f037435b73d 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -20,10 +20,10 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use sp_std::{vec, prelude::*}; -use frame_system::RawOrigin; -use frame_support::{ensure, traits::OnInitialize}; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_support::{ensure, traits::OnInitialize}; +use frame_system::RawOrigin; +use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; use frame_system::Pallet as System; @@ -31,7 +31,7 @@ use frame_system::Pallet as System; const BLOCK_NUMBER: u32 = 2; // Add `n` named items to the schedule -fn fill_schedule (when: T::BlockNumber, n: u32) -> Result<(), &'static str> { +fn fill_schedule(when: T::BlockNumber, n: u32) -> Result<(), &'static str> { // Essentially a no-op call. let call = frame_system::Call::set_storage(vec![]); for i in 0..n { @@ -141,8 +141,4 @@ benchmarks! { } } -impl_benchmark_test_suite!( - Scheduler, - crate::tests::new_test_ext(), - crate::tests::Test, -); +impl_benchmark_test_suite!(Scheduler, crate::tests::new_test_ext(), crate::tests::Test,); diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 4fdf1891be99ddf5505f602a4b82815e4ece525f..6cbf172d26d8ef20f14368b1cbbefa97d7963597 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -54,17 +54,23 @@ mod benchmarking; pub mod weights; -use sp_std::{prelude::*, marker::PhantomData, borrow::Borrow}; -use codec::{Encode, Decode, Codec}; -use sp_runtime::{RuntimeDebug, traits::{Zero, One, BadOrigin, Saturating}}; +use codec::{Codec, Decode, Encode}; use frame_support::{ - dispatch::{Dispatchable, DispatchError, DispatchResult, Parameter}, - traits::{Get, schedule::{self, DispatchTime}, OriginTrait, EnsureOrigin, IsType}, + dispatch::{DispatchError, DispatchResult, Dispatchable, Parameter}, + traits::{ + schedule::{self, DispatchTime}, + EnsureOrigin, Get, IsType, OriginTrait, + }, weights::{GetDispatchInfo, Weight}, }; use frame_system::{self as system, ensure_signed}; -pub use weights::WeightInfo; pub use pallet::*; +use sp_runtime::{ + traits::{BadOrigin, One, Saturating, Zero}, + RuntimeDebug, +}; +use sp_std::{borrow::Borrow, marker::PhantomData, prelude::*}; +pub use weights::WeightInfo; /// Just a simple index for naming period tasks. pub type PeriodicIndex = u32; @@ -210,21 +216,21 @@ pub mod pallet { } #[pallet::genesis_config] - pub struct GenesisConfig; + pub struct GenesisConfig; - #[cfg(feature = "std")] - impl Default for GenesisConfig { - fn default() -> Self { + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { Self } - } + } - #[pallet::genesis_build] - impl GenesisBuild for GenesisConfig { - fn build(&self) { + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { StorageVersion::::put(Releases::V2); - } - } + } + } #[pallet::hooks] impl Hooks> for Pallet { @@ -291,9 +297,9 @@ pub mod pallet { // - It's priority is `HARD_DEADLINE` // - It does not push the weight past the limit. // - It is the first item in the schedule - if s.priority <= schedule::HARD_DEADLINE - || cumulative_weight <= limit - || order == 0 + if s.priority <= schedule::HARD_DEADLINE || + cumulative_weight <= limit || + order == 0 { let r = s.call.clone().dispatch(s.origin.clone().into()); let maybe_id = s.maybe_id.clone(); @@ -497,20 +503,25 @@ impl Pallet { StorageVersion::::put(Releases::V2); Agenda::::translate::< - Vec::Call, T::BlockNumber>>>, _ - >(|_, agenda| Some( - agenda - .into_iter() - .map(|schedule| schedule.map(|schedule| ScheduledV2 { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call, - maybe_periodic: schedule.maybe_periodic, - origin: system::RawOrigin::Root.into(), - _phantom: Default::default(), - })) - .collect::>() - )); + Vec::Call, T::BlockNumber>>>, + _, + >(|_, agenda| { + Some( + agenda + .into_iter() + .map(|schedule| { + schedule.map(|schedule| ScheduledV2 { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call, + maybe_periodic: schedule.maybe_periodic, + origin: system::RawOrigin::Root.into(), + _phantom: Default::default(), + }) + }) + .collect::>(), + ) + }); true } else { @@ -521,20 +532,25 @@ impl Pallet { /// Helper to migrate scheduler when the pallet origin type has changed. pub fn migrate_origin + codec::Decode>() { Agenda::::translate::< - Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, _ - >(|_, agenda| Some( - agenda - .into_iter() - .map(|schedule| schedule.map(|schedule| Scheduled { - maybe_id: schedule.maybe_id, - priority: schedule.priority, - call: schedule.call, - maybe_periodic: schedule.maybe_periodic, - origin: schedule.origin.into(), - _phantom: Default::default(), - })) - .collect::>() - )); + Vec::Call, T::BlockNumber, OldOrigin, T::AccountId>>>, + _, + >(|_, agenda| { + Some( + agenda + .into_iter() + .map(|schedule| { + schedule.map(|schedule| Scheduled { + maybe_id: schedule.maybe_id, + priority: schedule.priority, + call: schedule.call, + maybe_periodic: schedule.maybe_periodic, + origin: schedule.origin.into(), + _phantom: Default::default(), + }) + }) + .collect::>(), + ) + }); } fn resolve_time(when: DispatchTime) -> Result { @@ -548,7 +564,7 @@ impl Pallet { }; if when <= now { - return Err(Error::::TargetBlockNumberInPast.into()); + return Err(Error::::TargetBlockNumberInPast.into()) } Ok(when) @@ -600,7 +616,7 @@ impl Pallet { |s| -> Result>, DispatchError> { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { if *o != s.origin { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } }; Ok(s.take()) @@ -625,7 +641,7 @@ impl Pallet { let new_time = Self::resolve_time(new_time)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()); + return Err(Error::::RescheduleNoChange.into()) } Agenda::::try_mutate(when, |agenda| -> DispatchResult { @@ -652,7 +668,7 @@ impl Pallet { ) -> Result, DispatchError> { // ensure id it is unique if Lookup::::contains_key(&id) { - return Err(Error::::FailedToSchedule)?; + return Err(Error::::FailedToSchedule)? } let when = Self::resolve_time(when)?; @@ -695,7 +711,7 @@ impl Pallet { if let Some(s) = agenda.get_mut(i) { if let (Some(ref o), Some(ref s)) = (origin, s.borrow()) { if *o != s.origin { - return Err(BadOrigin.into()); + return Err(BadOrigin.into()) } } *s = None; @@ -722,7 +738,7 @@ impl Pallet { let (when, index) = lookup.ok_or(Error::::NotFound)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()); + return Err(Error::::RescheduleNoChange.into()) } Agenda::::try_mutate(when, |agenda| -> DispatchResult { @@ -772,10 +788,7 @@ impl schedule::Anon::Call, T::PalletsOr } fn next_dispatch_time((when, index): Self::Address) -> Result { - Agenda::::get(when) - .get(index as usize) - .ok_or(()) - .map(|_| when) + Agenda::::get(when).get(index as usize).ok_or(()).map(|_| when) } } @@ -867,7 +880,10 @@ mod tests { } #[pallet::call] - impl Pallet where ::Origin: OriginTrait { + impl Pallet + where + ::Origin: OriginTrait, + { #[pallet::weight(*weight)] pub fn log(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); @@ -878,7 +894,11 @@ mod tests { } #[pallet::weight(*weight)] - pub fn log_without_filter(origin: OriginFor, i: u32, weight: Weight) -> DispatchResult { + pub fn log_without_filter( + origin: OriginFor, + i: u32, + weight: Weight, + ) -> DispatchResult { Self::deposit_event(Event::Logged(i, weight)); LOG.with(|log| { log.borrow_mut().push((origin.caller().clone(), i)); @@ -986,9 +1006,7 @@ mod tests { fn basic_scheduling_works() { new_test_ext().execute_with(|| { let call = Call::Logger(LoggerCall::log(42, 1000)); - assert!(!::BaseCallFilter::filter( - &call - )); + assert!(!::BaseCallFilter::filter(&call)); assert_ok!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call)); run_to_block(3); assert!(logger::log().is_empty()); @@ -1004,9 +1022,7 @@ mod tests { new_test_ext().execute_with(|| { run_to_block(2); let call = Call::Logger(LoggerCall::log(42, 1000)); - assert!(!::BaseCallFilter::filter( - &call - )); + assert!(!::BaseCallFilter::filter(&call)); // This will schedule the call 3 blocks after the next block... so block 3 + 3 = 6 assert_ok!(Scheduler::do_schedule(DispatchTime::After(3), None, 127, root(), call)); run_to_block(5); @@ -1038,7 +1054,11 @@ mod tests { new_test_ext().execute_with(|| { // at #4, every 3 blocks, 3 times. assert_ok!(Scheduler::do_schedule( - DispatchTime::At(4), Some((3, 3)), 127, root(), Call::Logger(logger::Call::log(42, 1000)) + DispatchTime::At(4), + Some((3, 3)), + 127, + root(), + Call::Logger(logger::Call::log(42, 1000)) )); run_to_block(3); assert!(logger::log().is_empty()); @@ -1051,15 +1071,9 @@ mod tests { run_to_block(9); assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32)]); run_to_block(10); - assert_eq!( - logger::log(), - vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)] - ); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); run_to_block(100); - assert_eq!( - logger::log(), - vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)] - ); + assert_eq!(logger::log(), vec![(root(), 42u32), (root(), 42u32), (root(), 42u32)]); }); } @@ -1068,14 +1082,20 @@ mod tests { new_test_ext().execute_with(|| { let call = Call::Logger(LoggerCall::log(42, 1000)); assert!(!::BaseCallFilter::filter(&call)); - assert_eq!(Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call).unwrap(), (4, 0)); + assert_eq!( + Scheduler::do_schedule(DispatchTime::At(4), None, 127, root(), call).unwrap(), + (4, 0) + ); run_to_block(3); assert!(logger::log().is_empty()); assert_eq!(Scheduler::do_reschedule((4, 0), DispatchTime::At(6)).unwrap(), (6, 0)); - assert_noop!(Scheduler::do_reschedule((6, 0), DispatchTime::At(6)), Error::::RescheduleNoChange); + assert_noop!( + Scheduler::do_reschedule((6, 0), DispatchTime::At(6)), + Error::::RescheduleNoChange + ); run_to_block(4); assert!(logger::log().is_empty()); @@ -1093,16 +1113,31 @@ mod tests { new_test_ext().execute_with(|| { let call = Call::Logger(LoggerCall::log(42, 1000)); assert!(!::BaseCallFilter::filter(&call)); - assert_eq!(Scheduler::do_schedule_named( - 1u32.encode(), DispatchTime::At(4), None, 127, root(), call - ).unwrap(), (4, 0)); + assert_eq!( + Scheduler::do_schedule_named( + 1u32.encode(), + DispatchTime::At(4), + None, + 127, + root(), + call + ) + .unwrap(), + (4, 0) + ); run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), (6, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), + (6, 0) + ); - assert_noop!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)), Error::::RescheduleNoChange); + assert_noop!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)), + Error::::RescheduleNoChange + ); run_to_block(4); assert!(logger::log().is_empty()); @@ -1120,15 +1155,30 @@ mod tests { new_test_ext().execute_with(|| { let call = Call::Logger(LoggerCall::log(42, 1000)); assert!(!::BaseCallFilter::filter(&call)); - assert_eq!(Scheduler::do_schedule_named( - 1u32.encode(), DispatchTime::At(4), Some((3, 3)), 127, root(), call - ).unwrap(), (4, 0)); + assert_eq!( + Scheduler::do_schedule_named( + 1u32.encode(), + DispatchTime::At(4), + Some((3, 3)), + 127, + root(), + call + ) + .unwrap(), + (4, 0) + ); run_to_block(3); assert!(logger::log().is_empty()); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(5)).unwrap(), (5, 0)); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), (6, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(5)).unwrap(), + (5, 0) + ); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(6)).unwrap(), + (6, 0) + ); run_to_block(5); assert!(logger::log().is_empty()); @@ -1136,7 +1186,10 @@ mod tests { run_to_block(6); assert_eq!(logger::log(), vec![(root(), 42u32)]); - assert_eq!(Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(10)).unwrap(), (10, 0)); + assert_eq!( + Scheduler::do_reschedule_named(1u32.encode(), DispatchTime::At(10)).unwrap(), + (10, 0) + ); run_to_block(9); assert_eq!(logger::log(), vec![(root(), 42u32)]); @@ -1157,11 +1210,22 @@ mod tests { new_test_ext().execute_with(|| { // at #4. Scheduler::do_schedule_named( - 1u32.encode(), DispatchTime::At(4), None, 127, root(), Call::Logger(LoggerCall::log(69, 1000)) - ).unwrap(); + 1u32.encode(), + DispatchTime::At(4), + None, + 127, + root(), + Call::Logger(LoggerCall::log(69, 1000)), + ) + .unwrap(); let i = Scheduler::do_schedule( - DispatchTime::At(4), None, 127, root(), Call::Logger(LoggerCall::log(42, 1000)) - ).unwrap(); + DispatchTime::At(4), + None, + 127, + root(), + Call::Logger(LoggerCall::log(42, 1000)), + ) + .unwrap(); run_to_block(3); assert!(logger::log().is_empty()); assert_ok!(Scheduler::do_cancel_named(None, 1u32.encode())); @@ -1315,10 +1379,7 @@ mod tests { assert_eq!(logger::log(), vec![(root(), 2600u32)]); // 69 and 42 fit together run_to_block(5); - assert_eq!( - logger::log(), - vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)] - ); + assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); }); } @@ -1372,9 +1433,9 @@ mod tests { let call_weight = MaximumSchedulerWeight::get() / 2; assert_eq!( actual_weight, - call_weight - + base_weight + base_multiplier - + named_multiplier + periodic_multiplier + call_weight + + base_weight + base_multiplier + + named_multiplier + periodic_multiplier ); assert_eq!(logger::log(), vec![(root(), 2600u32)]); @@ -1385,10 +1446,7 @@ mod tests { actual_weight, call_weight + base_weight + base_multiplier * 2 + periodic_multiplier ); - assert_eq!( - logger::log(), - vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)] - ); + assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); // Will include named only let actual_weight = Scheduler::on_initialize(3); @@ -1399,12 +1457,7 @@ mod tests { ); assert_eq!( logger::log(), - vec![ - (root(), 2600u32), - (root(), 69u32), - (root(), 42u32), - (root(), 3u32) - ] + vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32), (root(), 3u32)] ); // Will contain none @@ -1488,10 +1541,7 @@ mod tests { // Scheduled calls are in the agenda. assert_eq!(Agenda::::get(4).len(), 2); assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named( - system::RawOrigin::Signed(1).into(), - 1u32.encode() - )); + assert_ok!(Scheduler::cancel_named(system::RawOrigin::Signed(1).into(), 1u32.encode())); assert_ok!(Scheduler::cancel(system::RawOrigin::Signed(1).into(), 4, 1)); // Scheduled calls are made NONE, so should not effect state run_to_block(100); @@ -1550,18 +1600,12 @@ mod tests { Scheduler::cancel_named(system::RawOrigin::Signed(2).into(), 1u32.encode()), BadOrigin ); - assert_noop!( - Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), - BadOrigin - ); + assert_noop!(Scheduler::cancel(system::RawOrigin::Signed(2).into(), 4, 1), BadOrigin); assert_noop!( Scheduler::cancel_named(system::RawOrigin::Root.into(), 1u32.encode()), BadOrigin ); - assert_noop!( - Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), - BadOrigin - ); + assert_noop!(Scheduler::cancel(system::RawOrigin::Root.into(), 4, 1), BadOrigin); run_to_block(5); assert_eq!( logger::log(), diff --git a/substrate/frame/scheduler/src/weights.rs b/substrate/frame/scheduler/src/weights.rs index 648652428cbb82ed15858e8c9e45dbdab3c6e6cf..854cd5a525ceee72f3b80087a616e33e05124f21 100644 --- a/substrate/frame/scheduler/src/weights.rs +++ b/substrate/frame/scheduler/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/scored-pool/src/lib.rs b/substrate/frame/scored-pool/src/lib.rs index 5892862b4307e04dced3920ed66e84d35e0eff8e..fc25004eda68d19f678e6783a4f802ce5c91121c 100644 --- a/substrate/frame/scored-pool/src/lib.rs +++ b/substrate/frame/scored-pool/src/lib.rs @@ -91,18 +91,16 @@ mod mock; mod tests; use codec::FullCodec; -use sp_std::{ - fmt::Debug, - prelude::*, -}; use frame_support::{ ensure, - traits::{ChangeMembers, InitializeMembers, Currency, Get, ReservableCurrency}, + traits::{ChangeMembers, Currency, Get, InitializeMembers, ReservableCurrency}, }; -use sp_runtime::traits::{AtLeast32Bit, Zero, StaticLookup}; pub use pallet::*; +use sp_runtime::traits::{AtLeast32Bit, StaticLookup, Zero}; +use sp_std::{fmt::Debug, prelude::*}; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; type PoolT = Vec<(::AccountId, Option<>::Score>)>; /// The enum is supplied when refreshing the members set. @@ -117,10 +115,10 @@ enum ChangeReceiver { #[frame_support::pallet] pub mod pallet { + use super::*; use frame_support::{pallet_prelude::*, traits::EnsureOrigin, weights::Weight}; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::traits::MaybeSerializeDeserialize; - use super::*; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -132,8 +130,13 @@ pub mod pallet { type Currency: Currency + ReservableCurrency; /// The score attributed to a member or candidate. - type Score: - AtLeast32Bit + Clone + Copy + Default + FullCodec + MaybeSerializeDeserialize + Debug; + type Score: AtLeast32Bit + + Clone + + Copy + + Default + + FullCodec + + MaybeSerializeDeserialize + + Debug; /// The overarching event type. type Event: From> + IsType<::Event>; @@ -209,22 +212,19 @@ pub mod pallet { /// `T::AccountId`, but by `T::Score` instead). #[pallet::storage] #[pallet::getter(fn candidate_exists)] - pub(crate) type CandidateExists, I: 'static = ()> = StorageMap< - _, - Twox64Concat, T::AccountId, - bool, - ValueQuery, - >; + pub(crate) type CandidateExists, I: 'static = ()> = + StorageMap<_, Twox64Concat, T::AccountId, bool, ValueQuery>; /// The current membership, stored as an ordered Vec. #[pallet::storage] #[pallet::getter(fn members)] - pub(crate) type Members, I: 'static = ()> = StorageValue<_, Vec, ValueQuery>; + pub(crate) type Members, I: 'static = ()> = + StorageValue<_, Vec, ValueQuery>; /// Size of the `Members` set. #[pallet::storage] #[pallet::getter(fn member_count)] - pub(crate) type MemberCount = StorageValue<_, u32, ValueQuery>; + pub(crate) type MemberCount = StorageValue<_, u32, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig, I: 'static = ()> { @@ -235,10 +235,7 @@ pub mod pallet { #[cfg(feature = "std")] impl, I: 'static> Default for GenesisConfig { fn default() -> Self { - Self { - pool: Default::default(), - member_count: Default::default(), - } + Self { pool: Default::default(), member_count: Default::default() } } } @@ -249,19 +246,15 @@ pub mod pallet { // reserve balance for each candidate in the pool. // panicking here is ok, since this just happens one time, pre-genesis. - pool - .iter() - .for_each(|(who, _)| { - T::Currency::reserve(&who, T::CandidateDeposit::get()) - .expect("balance too low to create candidacy"); - >::insert(who, true); - }); + pool.iter().for_each(|(who, _)| { + T::Currency::reserve(&who, T::CandidateDeposit::get()) + .expect("balance too low to create candidacy"); + >::insert(who, true); + }); // Sorts the `Pool` by score in a descending order. Entities which // have a score of `None` are sorted to the beginning of the vec. - pool.sort_by_key(|(_, maybe_score)| - Reverse(maybe_score.unwrap_or_default()) - ); + pool.sort_by_key(|(_, maybe_score)| Reverse(maybe_score.unwrap_or_default())); >::put(self.member_count); >::put(&pool); @@ -324,10 +317,7 @@ pub mod pallet { /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. #[pallet::weight(0)] - pub fn withdraw_candidacy( - origin: OriginFor, - index: u32 - ) -> DispatchResult { + pub fn withdraw_candidacy(origin: OriginFor, index: u32) -> DispatchResult { let who = ensure_signed(origin)?; let pool = >::get(); @@ -348,7 +338,7 @@ pub mod pallet { pub fn kick( origin: OriginFor, dest: ::Source, - index: u32 + index: u32, ) -> DispatchResult { T::KickOrigin::ensure_origin(origin)?; @@ -373,7 +363,7 @@ pub mod pallet { origin: OriginFor, dest: ::Source, index: u32, - score: T::Score + score: T::Score, ) -> DispatchResult { T::ScoreOrigin::ensure_origin(origin)?; @@ -390,10 +380,9 @@ pub mod pallet { // where we can insert while maintaining order. let item = (who, Some(score.clone())); let location = pool - .binary_search_by_key( - &Reverse(score), - |(_, maybe_score)| Reverse(maybe_score.unwrap_or_default()) - ) + .binary_search_by_key(&Reverse(score), |(_, maybe_score)| { + Reverse(maybe_score.unwrap_or_default()) + }) .unwrap_or_else(|l| l); pool.insert(location, item); @@ -418,16 +407,12 @@ pub mod pallet { } impl, I: 'static> Pallet { - /// Fetches the `MemberCount` highest scoring members from /// `Pool` and puts them into `Members`. /// /// The `notify` parameter is used to deduct which associated /// type function to invoke at the end of the method. - fn refresh_members( - pool: PoolT, - notify: ChangeReceiver - ) { + fn refresh_members(pool: PoolT, notify: ChangeReceiver) { let count = MemberCount::::get(); let mut new_members: Vec = pool @@ -445,10 +430,7 @@ impl, I: 'static> Pallet { ChangeReceiver::MembershipInitialized => T::MembershipInitialized::initialize_members(&new_members), ChangeReceiver::MembershipChanged => - T::MembershipChanged::set_members_sorted( - &new_members[..], - &old_members[..], - ), + T::MembershipChanged::set_members_sorted(&new_members[..], &old_members[..]), } } @@ -459,7 +441,7 @@ impl, I: 'static> Pallet { fn remove_member( mut pool: PoolT, remove: T::AccountId, - index: u32 + index: u32, ) -> Result<(), Error> { // all callers of this function in this pallet also check // the index for validity before calling this function. @@ -486,11 +468,7 @@ impl, I: 'static> Pallet { /// Checks if `index` is a valid number and if the element found /// at `index` in `Pool` is equal to `who`. - fn ensure_index( - pool: &PoolT, - who: &T::AccountId, - index: u32 - ) -> Result<(), Error> { + fn ensure_index(pool: &PoolT, who: &T::AccountId, index: u32) -> Result<(), Error> { ensure!(index < pool.len() as u32, Error::::InvalidIndex); let (index_who, _index_score) = &pool[index as usize]; diff --git a/substrate/frame/scored-pool/src/mock.rs b/substrate/frame/scored-pool/src/mock.rs index 30dc48dd19d0aa4ca5e70c48fe331dee6a34d8b2..80ded36fbf0ab3a2659e17d979abba6e062d46d8 100644 --- a/substrate/frame/scored-pool/src/mock.rs +++ b/substrate/frame/scored-pool/src/mock.rs @@ -20,13 +20,14 @@ use super::*; use crate as pallet_scored_pool; -use std::cell::RefCell; -use frame_support::{parameter_types, ord_parameter_types, traits::GenesisBuild}; +use frame_support::{ord_parameter_types, parameter_types, traits::GenesisBuild}; +use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, }; -use frame_system::EnsureSignedBy; +use std::cell::RefCell; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -145,32 +146,26 @@ pub fn new_test_ext() -> sp_io::TestExternalities { (40, 500_000), (99, 1), ], - }.assimilate_storage(&mut t).unwrap(); - pallet_scored_pool::GenesisConfig::{ - pool: vec![ - (5, None), - (10, Some(1)), - (20, Some(2)), - (31, Some(2)), - (40, Some(3)), - ], + } + .assimilate_storage(&mut t) + .unwrap(); + pallet_scored_pool::GenesisConfig:: { + pool: vec![(5, None), (10, Some(1)), (20, Some(2)), (31, Some(2)), (40, Some(3))], member_count: 2, - .. Default::default() - }.assimilate_storage(&mut t).unwrap(); + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); t.into() } /// Fetch an entity from the pool, if existent. pub fn fetch_from_pool(who: u64) -> Option<(u64, Option)> { - >::pool() - .into_iter() - .find(|item| item.0 == who) + >::pool().into_iter().find(|item| item.0 == who) } /// Find an entity in the pool. /// Returns its position in the `Pool` vec, if existent. pub fn find_in_pool(who: u64) -> Option { - >::pool() - .into_iter() - .position(|item| item.0 == who) + >::pool().into_iter().position(|item| item.0 == who) } diff --git a/substrate/frame/scored-pool/src/tests.rs b/substrate/frame/scored-pool/src/tests.rs index 4a3b8384b744f655dbd6b8e0deaa2f5968eb96a0..0503e308e76a5f0406ae07db22915a7cb0878f90 100644 --- a/substrate/frame/scored-pool/src/tests.rs +++ b/substrate/frame/scored-pool/src/tests.rs @@ -20,7 +20,7 @@ use super::*; use mock::*; -use frame_support::{assert_ok, assert_noop, traits::OnInitialize}; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; use sp_runtime::traits::BadOrigin; type ScoredPool = Pallet; @@ -142,14 +142,12 @@ fn unscored_entities_must_not_be_used_for_filling_members() { // when // we remove every scored member - ScoredPool::pool() - .into_iter() - .for_each(|(who, score)| { - if let Some(_) = score { - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); - } - }); + ScoredPool::pool().into_iter().for_each(|(who, score)| { + if let Some(_) = score { + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); + } + }); // then // the `None` candidates should not have been filled in @@ -201,7 +199,10 @@ fn withdraw_candidacy_must_only_work_for_members() { new_test_ext().execute_with(|| { let who = 77; let index = 0; - assert_noop!( ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex); + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), index), + Error::::WrongAccountIndex + ); }); } @@ -210,9 +211,18 @@ fn oob_index_should_abort() { new_test_ext().execute_with(|| { let who = 40; let oob_index = ScoredPool::pool().len() as u32; - assert_noop!(ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), Error::::InvalidIndex); - assert_noop!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), Error::::InvalidIndex); - assert_noop!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), Error::::InvalidIndex); + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), + Error::::InvalidIndex + ); + assert_noop!( + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), + Error::::InvalidIndex + ); + assert_noop!( + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), + Error::::InvalidIndex + ); }); } @@ -221,9 +231,18 @@ fn index_mismatches_should_abort() { new_test_ext().execute_with(|| { let who = 40; let index = 3; - assert_noop!(ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex); - assert_noop!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), Error::::WrongAccountIndex); - assert_noop!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), Error::::WrongAccountIndex); + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), index), + Error::::WrongAccountIndex + ); + assert_noop!( + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), + Error::::WrongAccountIndex + ); + assert_noop!( + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), + Error::::WrongAccountIndex + ); }); } diff --git a/substrate/frame/session/benchmarking/src/lib.rs b/substrate/frame/session/benchmarking/src/lib.rs index d9a50b431f2e7bb0e5a51a5f3b609fe9bd60dcfb..117ef07d60a2b7a40b983120786f2fd9d26cb677 100644 --- a/substrate/frame/session/benchmarking/src/lib.rs +++ b/substrate/frame/session/benchmarking/src/lib.rs @@ -22,8 +22,7 @@ mod mock; -use sp_std::prelude::*; -use sp_std::vec; +use sp_std::{prelude::*, vec}; use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_support::{ @@ -41,7 +40,10 @@ use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; pub struct Pallet(pallet_session::Module); -pub trait Config: pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config {} +pub trait Config: + pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config +{ +} impl OnInitialize for Pallet { fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { @@ -120,20 +122,12 @@ benchmarks! { /// proof for the first authority and returns its key and the proof. fn check_membership_proof_setup( n: u32, -) -> ( - (sp_runtime::KeyTypeId, &'static [u8; 32]), - sp_session::MembershipProof, -) { +) -> ((sp_runtime::KeyTypeId, &'static [u8; 32]), sp_session::MembershipProof) { pallet_staking::ValidatorCount::::put(n); // create validators and set random session keys - for (n, who) in create_validators::(n, 1000) - .unwrap() - .into_iter() - .enumerate() - { - use rand::RngCore; - use rand::SeedableRng; + for (n, who) in create_validators::(n, 1000).unwrap().into_iter().enumerate() { + use rand::{RngCore, SeedableRng}; let validator = T::Lookup::lookup(who).unwrap(); let controller = pallet_staking::Pallet::::bonded(validator).unwrap(); @@ -168,9 +162,4 @@ fn check_membership_proof_setup( (key, Historical::::prove(key).unwrap()) } -impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(), - crate::mock::Test, - extra = false, -); +impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false,); diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index a3f9b6b447c38c047434022ecf8e78635bde57ba..bd61acb9de18069960eb75bd65af1b7c0866d793 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -19,9 +19,9 @@ #![cfg(test)] -use sp_runtime::traits::IdentityLookup; use frame_election_provider_support::onchain; use frame_support::parameter_types; +use sp_runtime::traits::IdentityLookup; type AccountId = u64; type AccountIndex = u32; @@ -114,7 +114,8 @@ impl pallet_session::SessionHandler for TestSessionHandler { _: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)], - ) {} + ) { + } fn on_disabled(_: usize) {} } diff --git a/substrate/frame/session/src/historical/mod.rs b/substrate/frame/session/src/historical/mod.rs index 3cfcbf98bf38cf2accb9508eb66cbbc32184a388..c9b13e3c7f262c59890002a2430e983997fed323 100644 --- a/substrate/frame/session/src/historical/mod.rs +++ b/substrate/frame/session/src/historical/mod.rs @@ -26,22 +26,27 @@ //! These roots and proofs of inclusion can be generated at any time during the current session. //! Afterwards, the proofs can be fed to a consensus module when reporting misbehavior. -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::KeyTypeId; -use sp_runtime::traits::{Convert, OpaqueKeys}; -use sp_session::{MembershipProof, ValidatorCount}; +use super::{Module as SessionModule, SessionIndex}; +use codec::{Decode, Encode}; use frame_support::{ - decl_module, decl_storage, Parameter, print, + decl_module, decl_storage, print, traits::{ValidatorSet, ValidatorSetWithIdentification}, + Parameter, +}; +use sp_runtime::{ + traits::{Convert, OpaqueKeys}, + KeyTypeId, +}; +use sp_session::{MembershipProof, ValidatorCount}; +use sp_std::prelude::*; +use sp_trie::{ + trie_types::{TrieDB, TrieDBMut}, + MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, }; -use sp_trie::{MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; -use sp_trie::trie_types::{TrieDBMut, TrieDB}; -use super::{SessionIndex, Module as SessionModule}; -mod shared; pub mod offchain; pub mod onchain; +mod shared; /// Config necessary for the historical module. pub trait Config: super::Config { @@ -165,7 +170,7 @@ impl> NoteHi Err(reason) => { print("Failed to generate historical ancestry-inclusion proof."); print(reason); - } + }, }; } else { let previous_index = new_index.saturating_sub(1); @@ -201,7 +206,8 @@ where } /// A tuple of the validator's ID and their full identification. -pub type IdentificationTuple = (::ValidatorId, ::FullIdentification); +pub type IdentificationTuple = + (::ValidatorId, ::FullIdentification); /// A trie instance for checking and generating proofs. pub struct ProvingTrie { @@ -211,7 +217,8 @@ pub struct ProvingTrie { impl ProvingTrie { fn generate_for(validators: I) -> Result - where I: IntoIterator + where + I: IntoIterator, { let mut db = MemoryDB::default(); let mut root = Default::default(); @@ -230,23 +237,20 @@ impl ProvingTrie { // map each key to the owner index. for key_id in T::Keys::key_ids() { let key = keys.get_raw(*key_id); - let res = (key_id, key).using_encoded(|k| - i.using_encoded(|v| trie.insert(k, v)) - ); + let res = + (key_id, key).using_encoded(|k| i.using_encoded(|v| trie.insert(k, v))); let _ = res.map_err(|_| "failed to insert into trie")?; } // map each owner index to the full identification. - let _ = i.using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) + let _ = i + .using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) .map_err(|_| "failed to insert into trie")?; } } - Ok(ProvingTrie { - db, - root, - }) + Ok(ProvingTrie { db, root }) } fn from_nodes(root: T::Hash, nodes: &[Vec]) -> Self { @@ -257,10 +261,7 @@ impl ProvingTrie { HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]); } - ProvingTrie { - db: memory_db, - root, - } + ProvingTrie { db: memory_db, root } } /// Prove the full verification data for a given key and key ID. @@ -291,11 +292,13 @@ impl ProvingTrie { // nodes within the current `MemoryDB` are insufficient to query the item. fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { let trie = TrieDB::new(&self.db, &self.root).ok()?; - let val_idx = (key_id, key_data).using_encoded(|s| trie.get(s)) + let val_idx = (key_id, key_data) + .using_encoded(|s| trie.get(s)) .ok()? .and_then(|raw| u32::decode(&mut &*raw).ok())?; - val_idx.using_encoded(|s| trie.get(s)) + val_idx + .using_encoded(|s| trie.get(s)) .ok()? .and_then(|raw| >::decode(&mut &*raw).ok()) } @@ -322,12 +325,11 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyT let trie = ProvingTrie::::generate_for(validators).ok()?; let (id, data) = key; - trie.prove(id, data.as_ref()) - .map(|trie_nodes| MembershipProof { - session, - trie_nodes, - validator_count: count, - }) + trie.prove(id, data.as_ref()).map(|trie_nodes| MembershipProof { + session, + trie_nodes, + validator_count: count, + }) } fn check_proof(key: (KeyTypeId, D), proof: Self::Proof) -> Option> { @@ -339,7 +341,7 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyT let count = >::validators().len() as ValidatorCount; if count != proof.validator_count { - return None; + return None } Some((owner, id)) @@ -349,7 +351,7 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyT let (root, count) = >::get(&proof.session)?; if count != proof.validator_count { - return None; + return None } let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); @@ -361,22 +363,22 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyT #[cfg(test)] pub(crate) mod tests { use super::*; - use sp_runtime::key_types::DUMMY; - use sp_runtime::testing::UintAuthorityId; use crate::mock::{ - NEXT_VALIDATORS, force_new_session, - set_next_validators, Test, System, Session, + force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, }; - use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; - use frame_support::BasicExternalities; + use frame_support::{ + traits::{KeyOwnerProofSystem, OnInitialize}, + BasicExternalities, + }; + use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId}; type Historical = Module; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let keys: Vec<_> = NEXT_VALIDATORS.with(|l| + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ); + }); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); @@ -430,7 +432,6 @@ pub(crate) mod tests { System::set_block_number(i); Session::on_initialize(i); - } assert_eq!(StoredRange::get(), Some((0, 100))); @@ -461,7 +462,6 @@ pub(crate) mod tests { System::set_block_number(i); Session::on_initialize(i); - } assert_eq!(StoredRange::get(), Some((100, 200))); diff --git a/substrate/frame/session/src/historical/offchain.rs b/substrate/frame/session/src/historical/offchain.rs index 68cc78029f12cf9bd2f01e7a460694ddb3207492..8583c2bb439bed6875d4c88a537dc84f804ecc67 100644 --- a/substrate/frame/session/src/historical/offchain.rs +++ b/substrate/frame/session/src/historical/offchain.rs @@ -27,17 +27,18 @@ use sp_runtime::{ offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef}, - KeyTypeId + KeyTypeId, }; use sp_session::MembershipProof; -use super::super::{Pallet as SessionModule, SessionIndex}; -use super::{IdentificationTuple, ProvingTrie, Config}; +use super::{ + super::{Pallet as SessionModule, SessionIndex}, + Config, IdentificationTuple, ProvingTrie, +}; use super::shared; use sp_std::prelude::*; - /// A set of validators, which was used for a fixed session index. struct ValidatorSet { validator_set: Vec>, @@ -87,15 +88,13 @@ pub fn prove_session_membership>( let trie = ProvingTrie::::generate_for(validators.into_iter()).ok()?; let (id, data) = session_key; - trie.prove(id, data.as_ref()) - .map(|trie_nodes| MembershipProof { - session: session_index, - trie_nodes, - validator_count: count, - }) + trie.prove(id, data.as_ref()).map(|trie_nodes| MembershipProof { + session: session_index, + trie_nodes, + validator_count: count, + }) } - /// Attempt to prune anything that is older than `first_to_keep` session index. /// /// Due to re-organisation it could be that the `first_to_keep` might be less @@ -104,18 +103,20 @@ pub fn prove_session_membership>( pub fn prune_older_than(first_to_keep: SessionIndex) { let derived_key = shared::LAST_PRUNE.to_vec(); let entry = StorageValueRef::persistent(derived_key.as_ref()); - match entry.mutate(|current: Result, StorageRetrievalError>| -> Result<_, ()> { - match current { - Ok(Some(current)) if current < first_to_keep => Ok(first_to_keep), - // do not move the cursor, if the new one would be behind ours - Ok(Some(current)) => Ok(current), - Ok(None) => Ok(first_to_keep), - // if the storage contains undecodable data, overwrite with current anyways - // which might leak some entries being never purged, but that is acceptable - // in this context - Err(_) => Ok(first_to_keep), - } - }) { + match entry.mutate( + |current: Result, StorageRetrievalError>| -> Result<_, ()> { + match current { + Ok(Some(current)) if current < first_to_keep => Ok(first_to_keep), + // do not move the cursor, if the new one would be behind ours + Ok(Some(current)) => Ok(current), + Ok(None) => Ok(first_to_keep), + // if the storage contains undecodable data, overwrite with current anyways + // which might leak some entries being never purged, but that is acceptable + // in this context + Err(_) => Ok(first_to_keep), + } + }, + ) { Ok(new_value) => { // on a re-org this is not necessarily true, with the above they might be equal if new_value < first_to_keep { @@ -124,9 +125,9 @@ pub fn prune_older_than(first_to_keep: SessionIndex) { let _ = StorageValueRef::persistent(derived_key.as_ref()).clear(); } } - } - Err(MutateStorageError::ConcurrentModification(_)) => {} - Err(MutateStorageError::ValueFunctionFailed(_)) => {} + }, + Err(MutateStorageError::ConcurrentModification(_)) => {}, + Err(MutateStorageError::ValueFunctionFailed(_)) => {}, } } @@ -141,23 +142,22 @@ pub fn keep_newest(n_to_keep: usize) { #[cfg(test)] mod tests { - use super::super::{onchain, Module}; - use super::*; + use super::{ + super::{onchain, Module}, + *, + }; use crate::mock::{ force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, }; use codec::Encode; use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; - use sp_core::crypto::key_types::DUMMY; - use sp_core::offchain::{ - testing::TestOffchainExt, - OffchainDbExt, - OffchainWorkerExt, - StorageKind, + use sp_core::{ + crypto::key_types::DUMMY, + offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt, StorageKind}, }; - use sp_runtime::testing::UintAuthorityId; use frame_support::BasicExternalities; + use sp_runtime::testing::UintAuthorityId; type Historical = Module; @@ -166,16 +166,16 @@ mod tests { .build_storage::() .expect("Failed to create test externalities."); - let keys: Vec<_> = NEXT_VALIDATORS.with(|l| + let keys: Vec<_> = NEXT_VALIDATORS.with(|l| { l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ); + }); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); } }); - crate::GenesisConfig::{ keys }.assimilate_storage(&mut t).unwrap(); + crate::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); let mut ext = sp_io::TestExternalities::new(t); @@ -193,13 +193,13 @@ mod tests { #[test] fn encode_decode_roundtrip() { + use super::super::{super::Config as SessionConfig, Config as HistoricalConfig}; use codec::{Decode, Encode}; - use super::super::super::Config as SessionConfig; - use super::super::Config as HistoricalConfig; let sample = ( - 22u32 as ::ValidatorId, - 7_777_777 as ::FullIdentification); + 22u32 as ::ValidatorId, + 7_777_777 as ::FullIdentification, + ); let encoded = sample.encode(); let decoded = Decode::decode(&mut encoded.as_slice()).expect("Must decode"); @@ -210,7 +210,7 @@ mod tests { fn onchain_to_offchain() { let mut ext = new_test_ext(); - const DATA: &[u8] = &[7,8,9,10,11]; + const DATA: &[u8] = &[7, 8, 9, 10, 11]; ext.execute_with(|| { b"alphaomega"[..].using_encoded(|key| sp_io::offchain_index::set(key, DATA)); }); @@ -218,15 +218,13 @@ mod tests { ext.persist_offchain_overlay(); ext.execute_with(|| { - let data = - b"alphaomega"[..].using_encoded(|key| { + let data = b"alphaomega"[..].using_encoded(|key| { sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, key) }); assert_eq!(data, Some(DATA.to_vec())); }); } - #[test] fn historical_proof_offchain() { let mut ext = new_test_ext(); @@ -251,8 +249,6 @@ mod tests { ext.persist_offchain_overlay(); ext.execute_with(|| { - - System::set_block_number(2); Session::on_initialize(2); assert_eq!(>::current_index(), 2); diff --git a/substrate/frame/session/src/historical/onchain.rs b/substrate/frame/session/src/historical/onchain.rs index 8fe63a79e1c59c664dbceb7e121792524c3e7712..514e343f4e0f69f9b46be05a49c101bd0c289936 100644 --- a/substrate/frame/session/src/historical/onchain.rs +++ b/substrate/frame/session/src/historical/onchain.rs @@ -20,9 +20,10 @@ use codec::Encode; use sp_runtime::traits::Convert; -use super::super::Config as SessionConfig; -use super::super::{Pallet as SessionModule, SessionIndex}; -use super::Config as HistoricalConfig; +use super::{ + super::{Config as SessionConfig, Pallet as SessionModule, SessionIndex}, + Config as HistoricalConfig, +}; use super::shared; use sp_std::prelude::*; diff --git a/substrate/frame/session/src/historical/shared.rs b/substrate/frame/session/src/historical/shared.rs index b054854d88fe8b2a3da138740e5bb0a612f3a904..e801aa80eef4c9197ae87725d72deeeb55f254da 100644 --- a/substrate/frame/session/src/historical/shared.rs +++ b/substrate/frame/session/src/historical/shared.rs @@ -18,10 +18,9 @@ //! Shared logic between on-chain and off-chain components used for slashing using an off-chain //! worker. - use super::SessionIndex; -use sp_std::prelude::*; use codec::Encode; +use sp_std::prelude::*; pub(super) const PREFIX: &[u8] = b"session_historical"; pub(super) const LAST_PRUNE: &[u8] = b"session_historical_last_prune"; @@ -30,10 +29,11 @@ pub(super) const LAST_PRUNE: &[u8] = b"session_historical_last_prune"; pub(super) fn derive_key>(prefix: P, session_index: SessionIndex) -> Vec { let prefix: &[u8] = prefix.as_ref(); session_index.using_encoded(|encoded_session_index| { - prefix.into_iter() + prefix + .into_iter() .chain(b"/".into_iter()) .chain(encoded_session_index.into_iter()) .copied() .collect::>() }) -} \ No newline at end of file +} diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 5095ed0154657214deceeb710967b72db853191f..cdeceb1ef53d2271ecfbf27f36dbe7a79004216f 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -106,31 +106,37 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "historical")] +pub mod historical; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -#[cfg(feature = "historical")] -pub mod historical; pub mod weights; -use sp_std::{prelude::*, marker::PhantomData, ops::{Sub, Rem}}; use codec::Decode; -use sp_runtime::{ - traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero}, - KeyTypeId, Perbill, Permill, RuntimeAppPublic, -}; -use sp_staking::SessionIndex; use frame_support::{ - ensure, decl_module, decl_event, decl_storage, decl_error, ConsensusEngineId, Parameter, + decl_error, decl_event, decl_module, decl_storage, + dispatch::{self, DispatchError, DispatchResult}, + ensure, traits::{ - Get, FindAuthor, ValidatorRegistration, EstimateNextSessionRotation, EstimateNextNewSession, - OneSessionHandler, ValidatorSet, + EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, OneSessionHandler, + ValidatorRegistration, ValidatorSet, }, - dispatch::{self, DispatchResult, DispatchError}, weights::Weight, + ConsensusEngineId, Parameter, }; use frame_system::ensure_signed; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero}, + KeyTypeId, Perbill, Permill, RuntimeAppPublic, +}; +use sp_staking::SessionIndex; +use sp_std::{ + marker::PhantomData, + ops::{Rem, Sub}, + prelude::*, +}; pub use weights::WeightInfo; /// Decides whether the session should be ended. @@ -147,10 +153,10 @@ pub trait ShouldEndSession { pub struct PeriodicSessions(PhantomData<(Period, Offset)>); impl< - BlockNumber: Rem + Sub + Zero + PartialOrd, - Period: Get, - Offset: Get, -> ShouldEndSession for PeriodicSessions + BlockNumber: Rem + Sub + Zero + PartialOrd, + Period: Get, + Offset: Get, + > ShouldEndSession for PeriodicSessions { fn should_end_session(now: BlockNumber) -> bool { let offset = Offset::get(); @@ -159,10 +165,10 @@ impl< } impl< - BlockNumber: AtLeast32BitUnsigned + Clone, - Period: Get, - Offset: Get -> EstimateNextSessionRotation for PeriodicSessions + BlockNumber: AtLeast32BitUnsigned + Clone, + Period: Get, + Offset: Get, + > EstimateNextSessionRotation for PeriodicSessions { fn average_session_length() -> BlockNumber { Period::get() @@ -177,15 +183,9 @@ impl< // (0% is never returned). let progress = if now >= offset { let current = (now - offset) % period.clone() + One::one(); - Some(Permill::from_rational( - current.clone(), - period.clone(), - )) + Some(Permill::from_rational(current.clone(), period.clone())) } else { - Some(Permill::from_rational( - now + One::one(), - offset, - )) + Some(Permill::from_rational(now + One::one(), offset)) }; // Weight note: `estimate_current_session_progress` has no storage reads and trivial @@ -257,7 +257,9 @@ pub trait SessionManager { } impl SessionManager for () { - fn new_session(_: SessionIndex) -> Option> { None } + fn new_session(_: SessionIndex) -> Option> { + None + } fn start_session(_: SessionIndex) {} fn end_session(_: SessionIndex) {} } @@ -591,9 +593,8 @@ impl Module { // Get queued session keys and validators. let session_keys = >::get(); - let validators = session_keys.iter() - .map(|(validator, _)| validator.clone()) - .collect::>(); + let validators = + session_keys.iter().map(|(validator, _)| validator.clone()).collect::>(); >::put(&validators); if changed { @@ -609,16 +610,15 @@ impl Module { // Get next validator set. let maybe_next_validators = T::SessionManager::new_session(session_index + 1); - let (next_validators, next_identities_changed) - = if let Some(validators) = maybe_next_validators - { - // NOTE: as per the documentation on `OnSessionEnding`, we consider - // the validator set as having changed even if the validators are the - // same as before, as underlying economic conditions may have changed. - (validators, true) - } else { - (>::get(), false) - }; + let (next_validators, next_identities_changed) = + if let Some(validators) = maybe_next_validators { + // NOTE: as per the documentation on `OnSessionEnding`, we consider + // the validator set as having changed even if the validators are the + // same as before, as underlying economic conditions may have changed. + (validators, true) + } else { + (>::get(), false) + }; // Queue next session keys. let (queued_amalgamated, next_changed) = { @@ -628,7 +628,9 @@ impl Module { let mut now_session_keys = session_keys.iter(); let mut check_next_changed = |keys: &T::Keys| { - if changed { return } + if changed { + return + } // since a new validator set always leads to `changed` starting // as true, we can ensure that `now_session_keys` and `next_validators` // have the same length. this function is called once per iteration. @@ -639,7 +641,8 @@ impl Module { } } }; - let queued_amalgamated = next_validators.into_iter() + let queued_amalgamated = next_validators + .into_iter() .map(|a| { let k = Self::load_keys(&a).unwrap_or_default(); check_next_changed(&k); @@ -657,11 +660,7 @@ impl Module { Self::deposit_event(Event::NewSession(session_index)); // Tell everyone about the new session keys. - T::SessionHandler::on_new_session::( - changed, - &session_keys, - &queued_amalgamated, - ); + T::SessionHandler::on_new_session::(changed, &session_keys, &queued_amalgamated); } /// Disable the validator of index `i`. @@ -695,7 +694,11 @@ impl Module { /// session is already disabled. /// If used with the staking module it allows to force a new era in such case. pub fn disable(c: &T::ValidatorId) -> sp_std::result::Result { - Self::validators().iter().position(|i| i == c).map(Self::disable_index).ok_or(()) + Self::validators() + .iter() + .position(|i| i == c) + .map(Self::disable_index) + .ok_or(()) } /// Upgrade the key type from some old type to a new type. Supports adding @@ -713,7 +716,8 @@ impl Module { /// it's recommended to initialize the keys to a (unique) dummy value with the expectation /// that all validators should invoke `set_keys` before those keys are actually /// required. - pub fn upgrade_keys(upgrade: F) where + pub fn upgrade_keys(upgrade: F) + where Old: OpaqueKeys + Member + Decode, F: Fn(T::ValidatorId, Old) -> T::Keys, { @@ -738,13 +742,13 @@ impl Module { Some(new_keys) }); - let _ = >::translate::, _>( - |k| { - k.map(|k| k.into_iter() + let _ = >::translate::, _>(|k| { + k.map(|k| { + k.into_iter() .map(|(val, old_keys)| (val.clone(), upgrade(val, old_keys))) - .collect::>()) - } - ); + .collect::>() + }) + }); } /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. @@ -771,7 +775,10 @@ impl Module { /// /// This does not ensure that the reference counter in system is incremented appropriately, it /// must be done by the caller or the keys will be leaked in storage. - fn inner_set_keys(who: &T::ValidatorId, keys: T::Keys) -> Result, DispatchError> { + fn inner_set_keys( + who: &T::ValidatorId, + keys: T::Keys, + ) -> Result, DispatchError> { let old_keys = Self::load_keys(who); for id in T::Keys::key_ids() { @@ -789,7 +796,7 @@ impl Module { if let Some(old) = old_keys.as_ref().map(|k| k.get_raw(*id)) { if key == old { - continue; + continue } Self::clear_key_owner(*id, old); @@ -864,7 +871,8 @@ impl> FindAuthor for FindAccountFromAuthorIndex { fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator + where + I: 'a + IntoIterator, { let i = Inner::find_author(digests)?; diff --git a/substrate/frame/session/src/mock.rs b/substrate/frame/session/src/mock.rs index 1462b2326777eece0d8d4d6fb0c71ddea90cdb4e..7007286de6415baa48e29c6873d2f4c6f20cb78b 100644 --- a/substrate/frame/session/src/mock.rs +++ b/substrate/frame/session/src/mock.rs @@ -18,18 +18,19 @@ //! Mock helpers for Session. use super::*; -use std::cell::RefCell; +use crate as pallet_session; +#[cfg(feature = "historical")] +use crate::historical as pallet_session_historical; use frame_support::{parameter_types, BasicExternalities}; use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ - Perbill, impl_opaque_keys, - traits::{BlakeTwo256, IdentityLookup, ConvertInto}, + impl_opaque_keys, testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + Perbill, }; use sp_staking::SessionIndex; -use crate as pallet_session; -#[cfg(feature = "historical")] -use crate::historical as pallet_session_historical; +use std::cell::RefCell; impl_opaque_keys! { pub struct MockSessionKeys { @@ -114,7 +115,12 @@ pub struct TestShouldEndSession; impl ShouldEndSession for TestShouldEndSession { fn should_end_session(now: u64) -> bool { let l = SESSION_LENGTH.with(|l| *l.borrow()); - now % l == 0 || FORCE_SESSION_END.with(|l| { let r = *l.borrow(); *l.borrow_mut() = false; r }) + now % l == 0 || + FORCE_SESSION_END.with(|l| { + let r = *l.borrow(); + *l.borrow_mut() = false; + r + }) } } @@ -128,11 +134,12 @@ impl SessionHandler for TestSessionHandler { _queued_validators: &[(u64, T)], ) { SESSION_CHANGED.with(|l| *l.borrow_mut() = changed); - AUTHORITIES.with(|l| - *l.borrow_mut() = validators.iter() + AUTHORITIES.with(|l| { + *l.borrow_mut() = validators + .iter() .map(|(_, id)| id.get::(DUMMY).unwrap_or_default()) .collect() - ); + }); } fn on_disabled(_validator_index: usize) { DISABLED.with(|l| *l.borrow_mut() = true) @@ -167,9 +174,7 @@ impl SessionManager for TestSessionManager { impl crate::historical::SessionManager for TestSessionManager { fn end_session(_: SessionIndex) {} fn start_session(_: SessionIndex) {} - fn new_session(new_index: SessionIndex) - -> Option> - { + fn new_session(new_index: SessionIndex) -> Option> { >::new_session(new_index) .map(|vals| vals.into_iter().map(|val| (val, val)).collect()) } @@ -180,11 +185,11 @@ pub fn authorities() -> Vec { } pub fn force_new_session() { - FORCE_SESSION_END.with(|l| *l.borrow_mut() = true ) + FORCE_SESSION_END.with(|l| *l.borrow_mut() = true) } pub fn set_session_length(x: u64) { - SESSION_LENGTH.with(|l| *l.borrow_mut() = x ) + SESSION_LENGTH.with(|l| *l.borrow_mut() = x) } pub fn session_changed() -> bool { @@ -205,9 +210,8 @@ pub fn reset_before_session_end_called() { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let keys: Vec<_> = NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ); + let keys: Vec<_> = NEXT_VALIDATORS + .with(|l| l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect()); BasicExternalities::execute_with_storage(&mut t, || { for (ref k, ..) in &keys { frame_system::Pallet::::inc_providers(k); @@ -216,7 +220,9 @@ pub fn new_test_ext() -> sp_io::TestExternalities { // An additional identity that we use. frame_system::Pallet::::inc_providers(&69); }); - pallet_session::GenesisConfig:: { keys }.assimilate_storage(&mut t).unwrap(); + pallet_session::GenesisConfig:: { keys } + .assimilate_storage(&mut t) + .unwrap(); sp_io::TestExternalities::new(t) } diff --git a/substrate/frame/session/src/tests.rs b/substrate/frame/session/src/tests.rs index a551e1a4a261275426799bc41d4eff60462a507c..cb1a21bbd647a3883b04ddb246e11c2ffc14f313 100644 --- a/substrate/frame/session/src/tests.rs +++ b/substrate/frame/session/src/tests.rs @@ -18,17 +18,16 @@ // Tests for the Session Pallet use super::*; -use mock::Test; use codec::Decode; -use frame_support::{traits::OnInitialize, assert_ok, assert_noop}; -use sp_core::crypto::key_types::DUMMY; -use sp_runtime::testing::UintAuthorityId; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; use mock::{ - SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, - set_next_validators, set_session_length, session_changed, Origin, System, Session, - reset_before_session_end_called, before_session_end_called, new_test_ext, - PreUpgradeMockSessionKeys, + authorities, before_session_end_called, force_new_session, new_test_ext, + reset_before_session_end_called, session_changed, set_next_validators, set_session_length, + Origin, PreUpgradeMockSessionKeys, Session, System, Test, SESSION_CHANGED, + TEST_SESSION_CHANGED, }; +use sp_core::crypto::key_types::DUMMY; +use sp_runtime::testing::UintAuthorityId; fn initialize_block(block: u64) { SESSION_CHANGED.with(|l| *l.borrow_mut() = false); @@ -79,10 +78,10 @@ fn authorities_should_track_validators() { set_next_validators(vec![1, 2]); force_new_session(); initialize_block(1); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),] + ); assert_eq!(Session::validators(), vec![1, 2, 3]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); assert!(before_session_end_called()); @@ -90,10 +89,10 @@ fn authorities_should_track_validators() { force_new_session(); initialize_block(2); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),] + ); assert_eq!(Session::validators(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); assert!(before_session_end_called()); @@ -103,22 +102,28 @@ fn authorities_should_track_validators() { assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); force_new_session(); initialize_block(3); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ] + ); assert_eq!(Session::validators(), vec![1, 2]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); assert!(before_session_end_called()); force_new_session(); initialize_block(4); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ] + ); assert_eq!(Session::validators(), vec![1, 2, 4]); assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]); }); @@ -288,10 +293,7 @@ fn periodic_session_works() { // 1/10 of progress. assert!(P::should_end_session(3u64)); assert_eq!(P::estimate_next_session_rotation(3u64).0.unwrap(), 3); - assert_eq!( - P::estimate_current_session_progress(3u64).0.unwrap(), - Permill::from_percent(10), - ); + assert_eq!(P::estimate_current_session_progress(3u64).0.unwrap(), Permill::from_percent(10),); for i in (1u64..10).map(|i| 3 + i) { assert!(!P::should_end_session(i)); @@ -314,30 +316,22 @@ fn periodic_session_works() { // the new session starts and we proceed in 1/10 increments. assert!(P::should_end_session(13u64)); assert_eq!(P::estimate_next_session_rotation(13u64).0.unwrap(), 23); - assert_eq!( - P::estimate_current_session_progress(13u64).0.unwrap(), - Permill::from_percent(10) - ); + assert_eq!(P::estimate_current_session_progress(13u64).0.unwrap(), Permill::from_percent(10)); assert!(!P::should_end_session(14u64)); assert_eq!(P::estimate_next_session_rotation(14u64).0.unwrap(), 23); - assert_eq!( - P::estimate_current_session_progress(14u64).0.unwrap(), - Permill::from_percent(20) - ); + assert_eq!(P::estimate_current_session_progress(14u64).0.unwrap(), Permill::from_percent(20)); } #[test] fn session_keys_generate_output_works_as_set_keys_input() { new_test_ext().execute_with(|| { let new_keys = mock::MockSessionKeys::generate(None); - assert_ok!( - Session::set_keys( - Origin::signed(2), - ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), - vec![], - ) - ); + assert_ok!(Session::set_keys( + Origin::signed(2), + ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), + vec![], + )); }); } @@ -368,26 +362,13 @@ fn upgrade_keys() { assert_eq!(mock::VALIDATORS.with(|l| l.borrow().clone()), vec![1, 2, 3]); new_test_ext().execute_with(|| { - let pre_one = PreUpgradeMockSessionKeys { - a: [1u8; 32], - b: [1u8; 64], - }; - - let pre_two = PreUpgradeMockSessionKeys { - a: [2u8; 32], - b: [2u8; 64], - }; - - let pre_three = PreUpgradeMockSessionKeys { - a: [3u8; 32], - b: [3u8; 64], - }; - - let val_keys = vec![ - (1u64, pre_one), - (2u64, pre_two), - (3u64, pre_three), - ]; + let pre_one = PreUpgradeMockSessionKeys { a: [1u8; 32], b: [1u8; 64] }; + + let pre_two = PreUpgradeMockSessionKeys { a: [2u8; 32], b: [2u8; 64] }; + + let pre_three = PreUpgradeMockSessionKeys { a: [3u8; 32], b: [3u8; 64] }; + + let val_keys = vec![(1u64, pre_one), (2u64, pre_two), (3u64, pre_three)]; // Set `QueuedKeys`. { @@ -422,9 +403,7 @@ fn upgrade_keys() { // Do the upgrade and check sanity. let mock_keys_for = |val| mock::MockSessionKeys { dummy: UintAuthorityId(val) }; - Session::upgrade_keys::( - |val, _old_keys| mock_keys_for(val), - ); + Session::upgrade_keys::(|val, _old_keys| mock_keys_for(val)); // Check key ownership. for (i, ref keys) in val_keys.iter() { @@ -438,11 +417,7 @@ fn upgrade_keys() { // Check queued keys. assert_eq!( Session::queued_keys(), - vec![ - (1, mock_keys_for(1)), - (2, mock_keys_for(2)), - (3, mock_keys_for(3)), - ], + vec![(1, mock_keys_for(1)), (2, mock_keys_for(2)), (3, mock_keys_for(3)),], ); for i in 1u64..4 { diff --git a/substrate/frame/session/src/weights.rs b/substrate/frame/session/src/weights.rs index ec911d8c01ccedbbc46f1717f927782e0bbbb891..ad722fdec1593a65e8f4143868cf5d035c30d419 100644 --- a/substrate/frame/session/src/weights.rs +++ b/substrate/frame/session/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/society/src/lib.rs b/substrate/frame/society/src/lib.rs index ff6cc0786dcb1539a8c5014929e178e09621cb89..ffe2759eb8f3983029567ab8269f32e061196751 100644 --- a/substrate/frame/society/src/lib.rs +++ b/substrate/frame/society/src/lib.rs @@ -251,25 +251,37 @@ mod mock; #[cfg(test)] mod tests; -use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::{Percent, RuntimeDebug, +use codec::{Decode, Encode}; +use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, + dispatch::DispatchResult, + ensure, traits::{ - StaticLookup, AccountIdConversion, Saturating, Zero, IntegerSquareRoot, Hash, - TrailingZeroInput, CheckedSub - } + BalanceStatus, ChangeMembers, Currency, EnsureOrigin, ExistenceRequirement::AllowDeath, + Get, Imbalance, OnUnbalanced, Randomness, ReservableCurrency, + }, + weights::Weight, + PalletId, }; -use frame_support::{decl_error, decl_module, decl_storage, decl_event, ensure, dispatch::DispatchResult, PalletId}; -use frame_support::weights::Weight; -use frame_support::traits::{ - Currency, ReservableCurrency, Randomness, Get, ChangeMembers, BalanceStatus, - ExistenceRequirement::AllowDeath, EnsureOrigin, OnUnbalanced, Imbalance +use frame_system::{self as system, ensure_root, ensure_signed}; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; +use sp_runtime::{ + traits::{ + AccountIdConversion, CheckedSub, Hash, IntegerSquareRoot, Saturating, StaticLookup, + TrailingZeroInput, Zero, + }, + Percent, RuntimeDebug, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use sp_std::prelude::*; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// The module's configuration trait. pub trait Config: system::Config { @@ -370,7 +382,7 @@ pub enum VouchingStatus { pub type StrikeCount = u32; /// A bid for entry into society. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug,)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Bid { /// The bidder/candidate trying to enter society who: AccountId, @@ -1187,7 +1199,6 @@ fn pick_item<'a, R: RngCore, T>(rng: &mut R, items: &'a [T]) -> Option<&'a T> { /// Pick a new PRN, in the range [0, `max`] (inclusive). fn pick_usize<'a, R: RngCore>(rng: &mut R, max: usize) -> usize { - (rng.next_u32() % (max as u32 + 1)) as usize } @@ -1198,7 +1209,7 @@ impl, I: Instance> Module { mut bids: Vec>>, who: &T::AccountId, value: BalanceOf, - bid_kind: BidKind> + bid_kind: BidKind>, ) { const MAX_BID_COUNT: usize = 1000; @@ -1206,7 +1217,8 @@ impl, I: Instance> Module { // Insert new elements after the existing ones. This ensures new bids // with the same bid value are further down the list than existing ones. Ok(pos) => { - let different_bid = bids.iter() + let different_bid = bids + .iter() // Easily extract the index we are on .enumerate() // Skip ahead to the suggested position @@ -1218,25 +1230,13 @@ impl, I: Instance> Module { // If the element is not at the end of the list, insert the new element // in the spot. if let Some((p, _)) = different_bid { - bids.insert(p, Bid { - value, - who: who.clone(), - kind: bid_kind, - }); + bids.insert(p, Bid { value, who: who.clone(), kind: bid_kind }); // If the element is at the end of the list, push the element on the end. } else { - bids.push(Bid { - value, - who: who.clone(), - kind: bid_kind, - }); + bids.push(Bid { value, who: who.clone(), kind: bid_kind }); } }, - Err(pos) => bids.insert(pos, Bid { - value, - who: who.clone(), - kind: bid_kind, - }), + Err(pos) => bids.insert(pos, Bid { value, who: who.clone(), kind: bid_kind }), } // Keep it reasonably small. if bids.len() > MAX_BID_COUNT { @@ -1245,10 +1245,10 @@ impl, I: Instance> Module { BidKind::Deposit(deposit) => { let err_amount = T::Currency::unreserve(&popped, deposit); debug_assert!(err_amount.is_zero()); - } + }, BidKind::Vouch(voucher, _) => { >::remove(&voucher); - } + }, } Self::deposit_event(RawEvent::AutoUnbid(popped)); } @@ -1263,7 +1263,10 @@ impl, I: Instance> Module { } /// Check a user is a candidate. - fn is_candidate(candidates: &Vec>>, who: &T::AccountId) -> bool { + fn is_candidate( + candidates: &Vec>>, + who: &T::AccountId, + ) -> bool { // Looking up a candidate is the same as looking up a bid Self::is_bid(candidates, who) } @@ -1307,7 +1310,7 @@ impl, I: Instance> Module { T::MembershipChanged::change_members_sorted(&[], &[m.clone()], &members[..]); >::put(members); Ok(()) - } + }, } } @@ -1333,73 +1336,87 @@ impl, I: Instance> Module { // critical issues or side-effects. This is auto-correcting as members fall out of society. members.reserve(candidates.len()); - let maturity = >::block_number() - + Self::lock_duration(members.len() as u32); + let maturity = + >::block_number() + Self::lock_duration(members.len() as u32); let mut rewardees = Vec::new(); let mut total_approvals = 0; let mut total_slash = >::zero(); let mut total_payouts = >::zero(); - let accepted = candidates.into_iter().filter_map(|Bid {value, who: candidate, kind }| { - let mut approval_count = 0; - - // Creates a vector of (vote, member) for the given candidate - // and tallies total number of approve votes for that candidate. - let votes = members.iter() - .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) - .inspect(|&(v, _)| if v == Vote::Approve { approval_count += 1 }) - .collect::>(); - - // Select one of the votes at random. - // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. - let is_accepted = pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); - - let matching_vote = if is_accepted { Vote::Approve } else { Vote::Reject }; - - let bad_vote = |m: &T::AccountId| { - // Voter voted wrong way (or was just a lazy skeptic) then reduce their payout - // and increase their strikes. after MaxStrikes then they go into suspension. - let amount = Self::slash_payout(m, T::WrongSideDeduction::get()); - - let strikes = >::mutate(m, |s| { - *s += 1; - *s - }); - if strikes >= T::MaxStrikes::get() { - Self::suspend_member(m); - } - amount - }; - - // Collect the voters who had a matching vote. - rewardees.extend(votes.into_iter() - .filter_map(|(v, m)| - if v == matching_vote { Some(m) } else { - total_slash += bad_vote(m); - None + let accepted = candidates + .into_iter() + .filter_map(|Bid { value, who: candidate, kind }| { + let mut approval_count = 0; + + // Creates a vector of (vote, member) for the given candidate + // and tallies total number of approve votes for that candidate. + let votes = members + .iter() + .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) + .inspect(|&(v, _)| { + if v == Vote::Approve { + approval_count += 1 + } + }) + .collect::>(); + + // Select one of the votes at random. + // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. + let is_accepted = + pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); + + let matching_vote = if is_accepted { Vote::Approve } else { Vote::Reject }; + + let bad_vote = |m: &T::AccountId| { + // Voter voted wrong way (or was just a lazy skeptic) then reduce their payout + // and increase their strikes. after MaxStrikes then they go into suspension. + let amount = Self::slash_payout(m, T::WrongSideDeduction::get()); + + let strikes = >::mutate(m, |s| { + *s += 1; + *s + }); + if strikes >= T::MaxStrikes::get() { + Self::suspend_member(m); } - ).cloned() - ); + amount + }; + + // Collect the voters who had a matching vote. + rewardees.extend( + votes + .into_iter() + .filter_map(|(v, m)| { + if v == matching_vote { + Some(m) + } else { + total_slash += bad_vote(m); + None + } + }) + .cloned(), + ); - if is_accepted { - total_approvals += approval_count; - total_payouts += value; - members.push(candidate.clone()); + if is_accepted { + total_approvals += approval_count; + total_payouts += value; + members.push(candidate.clone()); - Self::pay_accepted_candidate(&candidate, value, kind, maturity); + Self::pay_accepted_candidate(&candidate, value, kind, maturity); - // We track here the total_approvals so that every candidate has a unique range - // of numbers from 0 to `total_approvals` with length `approval_count` so each - // candidate is proportionally represented when selecting a "primary" below. - Some((candidate, total_approvals, value)) - } else { - // Suspend Candidate - >::insert(&candidate, (value, kind)); - Self::deposit_event(RawEvent::CandidateSuspended(candidate)); - None - } - }).collect::>(); + // We track here the total_approvals so that every candidate has a unique range + // of numbers from 0 to `total_approvals` with length `approval_count` so each + // candidate is proportionally represented when selecting a "primary" below. + Some((candidate, total_approvals, value)) + } else { + // Suspend Candidate + >::insert(&candidate, (value, kind)); + Self::deposit_event(RawEvent::CandidateSuspended(candidate)); + None + } + }) + .collect::>(); // Clean up all votes. >::remove_all(None); @@ -1411,7 +1428,12 @@ impl, I: Instance> Module { Self::bump_payout(winner, maturity, total_slash); } else { // Move the slashed amount back from payouts account to local treasury. - let res = T::Currency::transfer(&Self::payouts(), &Self::account_id(), total_slash, AllowDeath); + let res = T::Currency::transfer( + &Self::payouts(), + &Self::account_id(), + total_slash, + AllowDeath, + ); debug_assert!(res.is_ok()); } } @@ -1423,7 +1445,12 @@ impl, I: Instance> Module { // this should never fail since we ensure we can afford the payouts in a previous // block, but there's not much we can do to recover if it fails anyway. - let res = T::Currency::transfer(&Self::account_id(), &Self::payouts(), total_payouts, AllowDeath); + let res = T::Currency::transfer( + &Self::account_id(), + &Self::payouts(), + total_payouts, + AllowDeath, + ); debug_assert!(res.is_ok()); } @@ -1433,10 +1460,15 @@ impl, I: Instance> Module { // Choose a random number between 0 and `total_approvals` let primary_point = pick_usize(&mut rng, total_approvals - 1); // Find the zero bid or the user who falls on that point - let primary = accepted.iter().find(|e| e.2.is_zero() || e.1 > primary_point) - .expect("e.1 of final item == total_approvals; \ - worst case find will always return that item; qed") - .0.clone(); + let primary = accepted + .iter() + .find(|e| e.2.is_zero() || e.1 > primary_point) + .expect( + "e.1 of final item == total_approvals; \ + worst case find will always return that item; qed", + ) + .0 + .clone(); let accounts = accepted.into_iter().map(|x| x.0).collect::>(); @@ -1464,9 +1496,10 @@ impl, I: Instance> Module { >::put(&candidates); // Select sqrt(n) random members from the society and make them skeptics. - let pick_member = |_| pick_item(&mut rng, &members[..]).expect("exited if members empty; qed"); + let pick_member = + |_| pick_item(&mut rng, &members[..]).expect("exited if members empty; qed"); for skeptic in (0..members.len().integer_sqrt()).map(pick_member) { - for Bid{ who: c, .. } in candidates.iter() { + for Bid { who: c, .. } in candidates.iter() { >::insert(c, skeptic, Vote::Skeptic); } } @@ -1487,7 +1520,7 @@ impl, I: Instance> Module { // whole slash is accounted for. *amount -= rest; rest = Zero::zero(); - break; + break } } >::insert(who, &payouts[dropped..]); @@ -1497,10 +1530,12 @@ impl, I: Instance> Module { /// Bump the payout amount of `who`, to be unlocked at the given block number. fn bump_payout(who: &T::AccountId, when: T::BlockNumber, value: BalanceOf) { - if !value.is_zero(){ - >::mutate(who, |payouts| match payouts.binary_search_by_key(&when, |x| x.0) { - Ok(index) => payouts[index].1 += value, - Err(index) => payouts.insert(index, (when, value)), + if !value.is_zero() { + >::mutate(who, |payouts| { + match payouts.binary_search_by_key(&when, |x| x.0) { + Ok(index) => payouts[index].1 += value, + Err(index) => payouts.insert(index, (when, value)), + } }); } } @@ -1528,7 +1563,7 @@ impl, I: Instance> Module { let err_amount = T::Currency::unreserve(candidate, deposit); debug_assert!(err_amount.is_zero()); value - } + }, BidKind::Vouch(voucher, tip) => { // Check that the voucher is still vouching, else some other logic may have removed their status. if >::take(&voucher) == Some(VouchingStatus::Vouching) { @@ -1539,7 +1574,7 @@ impl, I: Instance> Module { } else { value } - } + }, }; Self::bump_payout(candidate, maturity, value); @@ -1554,14 +1589,12 @@ impl, I: Instance> Module { let mut approval_count = 0; let mut rejection_count = 0; // Tallies total number of approve and reject votes for the defender. - members.iter() - .filter_map(|m| >::take(m)) - .for_each(|v| { - match v { - Vote::Approve => approval_count += 1, - _ => rejection_count += 1, - } - }); + members.iter().filter_map(|m| >::take(m)).for_each( + |v| match v { + Vote::Approve => approval_count += 1, + _ => rejection_count += 1, + }, + ); if approval_count <= rejection_count { // User has failed the challenge diff --git a/substrate/frame/society/src/mock.rs b/substrate/frame/society/src/mock.rs index 18cdda678da6f1d3016e3c92411f6b46e7917c72..2ae9f7b44ba73b3c4990592fcf5c12e13a52bdd9 100644 --- a/substrate/frame/society/src/mock.rs +++ b/substrate/frame/society/src/mock.rs @@ -21,16 +21,16 @@ use super::*; use crate as pallet_society; use frame_support::{ - parameter_types, ord_parameter_types, - traits::{OnInitialize, OnFinalize}, + ord_parameter_types, parameter_types, + traits::{OnFinalize, OnInitialize}, }; use frame_support_test::TestRandomness; +use frame_system::EnsureSignedBy; use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; -use frame_system::EnsureSignedBy; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -156,14 +156,16 @@ impl EnvBuilder { pub fn execute R>(mut self, f: F) -> R { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); self.balances.push((Society::account_id(), self.balance.max(self.pot))); - pallet_balances::GenesisConfig:: { - balances: self.balances, - }.assimilate_storage(&mut t).unwrap(); - pallet_society::GenesisConfig::{ + pallet_balances::GenesisConfig:: { balances: self.balances } + .assimilate_storage(&mut t) + .unwrap(); + pallet_society::GenesisConfig:: { members: self.members, pot: self.pot, max_members: self.max_members, - }.assimilate_storage(&mut t).unwrap(); + } + .assimilate_storage(&mut t) + .unwrap(); let mut ext: sp_io::TestExternalities = t.into(); ext.execute_with(f) } @@ -210,12 +212,7 @@ pub fn run_to_block(n: u64) { pub fn create_bid( value: Balance, who: AccountId, - kind: BidKind -) -> Bid -{ - Bid { - who, - kind, - value - } + kind: BidKind, +) -> Bid { + Bid { who, kind, value } } diff --git a/substrate/frame/society/src/tests.rs b/substrate/frame/society/src/tests.rs index 7c834483957707439657f65e0d7a79a307e229a0..9f8e32dea5088e7400995fe8a23063e4a536d038 100644 --- a/substrate/frame/society/src/tests.rs +++ b/substrate/frame/society/src/tests.rs @@ -20,9 +20,9 @@ use super::*; use mock::*; -use frame_support::{assert_ok, assert_noop}; -use sp_runtime::traits::BadOrigin; +use frame_support::{assert_noop, assert_ok}; use sp_core::blake2_256; +use sp_runtime::traits::BadOrigin; #[test] fn founding_works() { @@ -118,10 +118,13 @@ fn bidding_works() { assert_eq!(Society::pot(), 1000); assert_eq!(Balances::free_balance(Society::account_id()), 10_000); // Choose smallest bidding users whose total is less than pot - assert_eq!(Society::candidates(), vec![ - create_bid(300, 30, BidKind::Deposit(25)), - create_bid(400, 40, BidKind::Deposit(25)), - ]); + assert_eq!( + Society::candidates(), + vec![ + create_bid(300, 30, BidKind::Deposit(25)), + create_bid(400, 40, BidKind::Deposit(25)), + ] + ); // A member votes for these candidates to join the society assert_ok!(Society::vote(Origin::signed(10), 30, true)); assert_ok!(Society::vote(Origin::signed(10), 40, true)); @@ -132,7 +135,7 @@ fn bidding_works() { assert_eq!(Balances::free_balance(Society::account_id()), 9_300); assert_eq!(Society::pot(), 1_300); // Left over from the original bids is 50 who satisfies the condition of bid less than pot. - assert_eq!(Society::candidates(), vec![ create_bid(500, 50, BidKind::Deposit(25)) ]); + assert_eq!(Society::candidates(), vec![create_bid(500, 50, BidKind::Deposit(25))]); // 40, now a member, can vote for 50 assert_ok!(Society::vote(Origin::signed(40), 50, true)); run_to_block(12); @@ -144,7 +147,7 @@ fn bidding_works() { // No more candidates satisfy the requirements assert_eq!(Society::candidates(), vec![]); assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - // Next period + // Next period run_to_block(16); // Same members assert_eq!(Society::members(), vec![10, 30, 40, 50]); @@ -153,7 +156,7 @@ fn bidding_works() { // No payouts assert_eq!(Balances::free_balance(Society::account_id()), 8_800); // Candidate 60 now qualifies based on the increased pot size. - assert_eq!(Society::candidates(), vec![ create_bid(1900, 60, BidKind::Deposit(25)) ]); + assert_eq!(Society::candidates(), vec![create_bid(1900, 60, BidKind::Deposit(25))]); // Candidate 60 is voted in. assert_ok!(Society::vote(Origin::signed(50), 60, true)); run_to_block(20); @@ -183,7 +186,7 @@ fn unbidding_works() { assert_eq!(Balances::reserved_balance(30), 0); // 20 wins candidacy run_to_block(4); - assert_eq!(Society::candidates(), vec![ create_bid(1000, 20, BidKind::Deposit(25)) ]); + assert_eq!(Society::candidates(), vec![create_bid(1000, 20, BidKind::Deposit(25))]); }); } @@ -350,7 +353,10 @@ fn suspended_candidate_rejected_works() { assert_eq!(Society::suspended_candidate(20).is_some(), true); // Normal user cannot make judgement on suspended candidate - assert_noop!(Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), BadOrigin); + assert_noop!( + Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), + BadOrigin + ); // Suspension judgement origin makes no direct judgement assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Rebid)); @@ -391,7 +397,10 @@ fn vouch_works() { assert_ok!(Society::vouch(Origin::signed(10), 20, 1000, 100)); assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); // A member cannot vouch twice at the same time - assert_noop!(Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching); + assert_noop!( + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching + ); // Vouching creates the right kind of bid assert_eq!(>::get(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); // Vouched user can become candidate @@ -475,7 +484,10 @@ fn unvouch_works() { assert_eq!(Society::members(), vec![10]); // 10 cannot vouch again - assert_noop!(Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching); + assert_noop!( + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching + ); // 10 cannot unvouch either, so they are banned forever. assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::NotVouching); }); @@ -654,7 +666,7 @@ fn bad_vote_slash_works() { assert_eq!(>::get(30), 0); assert_eq!(>::get(40), 0); // Their payout is slashed, a random person is rewarded - assert_eq!(>::get(10), vec![(5, 100), (9,2)]); + assert_eq!(>::get(10), vec![(5, 100), (9, 2)]); assert_eq!(>::get(20), vec![(5, 98)]); assert_eq!(>::get(30), vec![(5, 100)]); assert_eq!(>::get(40), vec![(5, 100)]); @@ -672,7 +684,10 @@ fn user_cannot_bid_twice() { assert_noop!(Society::bid(Origin::signed(30), 100), Error::::AlreadyBid); // Cannot vouch when already bid assert_ok!(Society::add_member(&50)); - assert_noop!(Society::vouch(Origin::signed(50), 20, 100, 100), Error::::AlreadyBid); + assert_noop!( + Society::vouch(Origin::signed(50), 20, 100, 100), + Error::::AlreadyBid + ); }); } @@ -794,7 +809,11 @@ fn max_limits_work() { assert_eq!(Society::candidates().len(), 4); // Fill up members with suspended candidates from the first rotation for i in 100..104 { - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), i, Judgement::Approve)); + assert_ok!(Society::judge_suspended_candidate( + Origin::signed(2), + i, + Judgement::Approve + )); } assert_eq!(Society::members().len(), 100); // Can't add any more members @@ -840,15 +859,18 @@ fn zero_bid_works() { assert_eq!(Society::pot(), 1000); assert_eq!(Balances::free_balance(Society::account_id()), 10_000); // Choose smallest bidding users whose total is less than pot, with only one zero bid. - assert_eq!(Society::candidates(), vec![ - create_bid(0, 30, BidKind::Deposit(25)), - create_bid(300, 50, BidKind::Deposit(25)), - create_bid(400, 60, BidKind::Deposit(25)), - ]); - assert_eq!(>::get(), vec![ - create_bid(0, 20, BidKind::Deposit(25)), - create_bid(0, 40, BidKind::Deposit(25)), - ]); + assert_eq!( + Society::candidates(), + vec![ + create_bid(0, 30, BidKind::Deposit(25)), + create_bid(300, 50, BidKind::Deposit(25)), + create_bid(400, 60, BidKind::Deposit(25)), + ] + ); + assert_eq!( + >::get(), + vec![create_bid(0, 20, BidKind::Deposit(25)), create_bid(0, 40, BidKind::Deposit(25)),] + ); // A member votes for these candidates to join the society assert_ok!(Society::vote(Origin::signed(10), 30, true)); assert_ok!(Society::vote(Origin::signed(10), 50, true)); @@ -878,7 +900,7 @@ fn bids_ordered_correctly() { for j in 0..5 { for i in 0..5 { - final_list.push(create_bid(j, 100 + (i * 5 + j) as u128, BidKind::Deposit(25))); + final_list.push(create_bid(j, 100 + (i * 5 + j) as u128, BidKind::Deposit(25))); } } diff --git a/substrate/frame/staking/reward-curve/src/lib.rs b/substrate/frame/staking/reward-curve/src/lib.rs index de912eee99ce2f4addb011f03aac16836b52eee5..c225c9045783528f66e57f0e303fb5c0cb6fccd0 100644 --- a/substrate/frame/staking/reward-curve/src/lib.rs +++ b/substrate/frame/staking/reward-curve/src/lib.rs @@ -21,7 +21,7 @@ mod log; use log::log2; use proc_macro::TokenStream; -use proc_macro2::{TokenStream as TokenStream2, Span}; +use proc_macro2::{Span, TokenStream as TokenStream2}; use proc_macro_crate::{crate_name, FoundCrate}; use quote::{quote, ToTokens}; use std::convert::TryInto; @@ -82,7 +82,9 @@ pub fn build(input: TokenStream) -> TokenStream { let test_module = generate_test_module(&input); let imports = match crate_name("sp-runtime") { - Ok(FoundCrate::Itself) => quote!( extern crate sp_runtime as _sp_runtime; ), + Ok(FoundCrate::Itself) => quote!( + extern crate sp_runtime as _sp_runtime; + ), Ok(FoundCrate::Name(sp_runtime)) => { let ident = syn::Ident::new(&sp_runtime, Span::call_site()); quote!( extern crate #ident as _sp_runtime; ) @@ -99,7 +101,8 @@ pub fn build(input: TokenStream) -> TokenStream { #declaration }; #test_module - ).into() + ) + .into() } const MILLION: u32 = 1_000_000; @@ -134,10 +137,10 @@ struct Bounds { impl Bounds { fn check(&self, value: u32) -> bool { - let wrong = (self.min_strict && value <= self.min) - || (!self.min_strict && value < self.min) - || (self.max_strict && value >= self.max) - || (!self.max_strict && value > self.max); + let wrong = (self.min_strict && value <= self.min) || + (!self.min_strict && value < self.min) || + (self.max_strict && value >= self.max) || + (!self.max_strict && value > self.max); !wrong } @@ -156,17 +159,24 @@ impl core::fmt::Display for Bounds { } } -fn parse_field(input: ParseStream, bounds: Bounds) - -> syn::Result -{ +fn parse_field( + input: ParseStream, + bounds: Bounds, +) -> syn::Result { ::parse(&input)?; ::parse(&input)?; let value_lit = syn::LitInt::parse(&input)?; let value: u32 = value_lit.base10_parse()?; if !bounds.check(value) { - return Err(syn::Error::new(value_lit.span(), format!( - "Invalid {}: {}, must be in {}", Token::default().to_token_stream(), value, bounds, - ))); + return Err(syn::Error::new( + value_lit.span(), + format!( + "Invalid {}: {}, must be in {}", + Token::default().to_token_stream(), + value, + bounds, + ), + )) } Ok(value) @@ -187,54 +197,42 @@ impl Parse for INposInput { ::parse(&input)?; if !input.is_empty() { - return Err(input.error("expected end of input stream, no token expected")); + return Err(input.error("expected end of input stream, no token expected")) } - let min_inflation = parse_field::(&args_input, Bounds { - min: 0, - min_strict: true, - max: 1_000_000, - max_strict: false, - })?; + let min_inflation = parse_field::( + &args_input, + Bounds { min: 0, min_strict: true, max: 1_000_000, max_strict: false }, + )?; ::parse(&args_input)?; - let max_inflation = parse_field::(&args_input, Bounds { - min: min_inflation, - min_strict: true, - max: 1_000_000, - max_strict: false, - })?; + let max_inflation = parse_field::( + &args_input, + Bounds { min: min_inflation, min_strict: true, max: 1_000_000, max_strict: false }, + )?; ::parse(&args_input)?; - let ideal_stake = parse_field::(&args_input, Bounds { - min: 0_100_000, - min_strict: false, - max: 0_900_000, - max_strict: false, - })?; + let ideal_stake = parse_field::( + &args_input, + Bounds { min: 0_100_000, min_strict: false, max: 0_900_000, max_strict: false }, + )?; ::parse(&args_input)?; - let falloff = parse_field::(&args_input, Bounds { - min: 0_010_000, - min_strict: false, - max: 1_000_000, - max_strict: false, - })?; + let falloff = parse_field::( + &args_input, + Bounds { min: 0_010_000, min_strict: false, max: 1_000_000, max_strict: false }, + )?; ::parse(&args_input)?; - let max_piece_count = parse_field::(&args_input, Bounds { - min: 2, - min_strict: false, - max: 1_000, - max_strict: false, - })?; + let max_piece_count = parse_field::( + &args_input, + Bounds { min: 2, min_strict: false, max: 1_000, max_strict: false }, + )?; ::parse(&args_input)?; - let test_precision = parse_field::(&args_input, Bounds { - min: 0, - min_strict: false, - max: 1_000_000, - max_strict: false, - })?; + let test_precision = parse_field::( + &args_input, + Bounds { min: 0, min_strict: false, max: 1_000_000, max_strict: false }, + )?; >::parse(&args_input)?; if !args_input.is_empty() { - return Err(args_input.error("expected end of input stream, no token expected")); + return Err(args_input.error("expected end of input stream, no token expected")) } Ok(Self { @@ -263,7 +261,8 @@ impl INPoS { INPoS { i_0: input.min_inflation, i_ideal: (input.max_inflation as u64 * MILLION as u64 / input.ideal_stake as u64) - .try_into().unwrap(), + .try_into() + .unwrap(), i_ideal_times_x_ideal: input.max_inflation, x_ideal: input.ideal_stake, d: input.falloff, @@ -275,7 +274,7 @@ impl INPoS { // See web3 docs for the details fn compute_opposite_after_x_ideal(&self, y: u32) -> u32 { if y == self.i_0 { - return u32::MAX; + return u32::MAX } // Note: the log term calculated here represents a per_million value let log = log2(self.i_ideal_times_x_ideal - self.i_0, y - self.i_0); @@ -295,8 +294,8 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { // For each point p: (next_p.0 - p.0) < segment_length && (next_p.1 - p.1) < segment_length. // This ensures that the total number of segment doesn't overflow max_piece_count. - let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) - / (input.max_piece_count - 1); + let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) / + (input.max_piece_count - 1); let mut delta_y = max_length; let mut y = input.max_inflation; @@ -322,16 +321,15 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { let prev = points.last().unwrap(); // Compute the y corresponding to x=1_000_000 using the this point and the previous one. - let delta_y: u32 = ( - (next_x - 1_000_000) as u64 - * (prev.1 - next_y) as u64 - / (next_x - prev.0) as u64 - ).try_into().unwrap(); + let delta_y: u32 = ((next_x - 1_000_000) as u64 * (prev.1 - next_y) as u64 / + (next_x - prev.0) as u64) + .try_into() + .unwrap(); let y = next_y + delta_y; points.push((1_000_000, y)); - return points; + return points } points.push((next_x, next_y)); y = next_y; @@ -345,7 +343,8 @@ fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { fn generate_piecewise_linear(points: Vec<(u32, u32)>) -> TokenStream2 { let mut points_tokens = quote!(); - let max = points.iter() + let max = points + .iter() .map(|&(_, x)| x) .max() .unwrap_or(0) @@ -354,13 +353,15 @@ fn generate_piecewise_linear(points: Vec<(u32, u32)>) -> TokenStream2 { .unwrap_or(1_000_000_000); for (x, y) in points { - let error = || panic!( - "Generated reward curve approximation doesn't fit into [0, 1] -> [0, 1] \ + let error = || { + panic!( + "Generated reward curve approximation doesn't fit into [0, 1] -> [0, 1] \ because of point: x = {:07} per million y = {:07} per million", - x, y - ); + x, y + ) + }; let x_perbill = x.checked_mul(1_000).unwrap_or_else(error); let y_perbill = y.checked_mul(1_000).unwrap_or_else(error); @@ -386,7 +387,7 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { let ident = &input.ident; let precision = input.test_precision; - let i_0 = inpos.i_0 as f64/ MILLION as f64; + let i_0 = inpos.i_0 as f64 / MILLION as f64; let i_ideal_times_x_ideal = inpos.i_ideal_times_x_ideal as f64 / MILLION as f64; let i_ideal = inpos.i_ideal as f64 / MILLION as f64; let x_ideal = inpos.x_ideal as f64 / MILLION as f64; @@ -443,5 +444,6 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { ); } } - ).into() + ) + .into() } diff --git a/substrate/frame/staking/reward-curve/src/log.rs b/substrate/frame/staking/reward-curve/src/log.rs index 747011a73e1db848fe972ecebd84df3dfa0ea744..06d2000619b5c82802458fa91b5d99e903259f15 100644 --- a/substrate/frame/staking/reward-curve/src/log.rs +++ b/substrate/frame/staking/reward-curve/src/log.rs @@ -4,7 +4,7 @@ use std::convert::TryInto; macro_rules! pow2 { ($n:expr) => { 1_u32 << $n - } + }; } /// Returns the k_th per_million taylor term for a log2 function @@ -33,7 +33,7 @@ fn taylor_term(k: u32, y_num: u128, y_den: u128) -> u32 { /// * result represents a per-million output of log2 pub fn log2(p: u32, q: u32) -> u32 { assert!(p >= q); // keep p/q bound to [1, inf) - assert!(p <= u32::MAX/2); + assert!(p <= u32::MAX / 2); // This restriction should not be mandatory. But function is only tested and used for this. assert!(p <= 1_000_000); @@ -79,7 +79,7 @@ fn test_log() { let p: u32 = (1_000_000 as u64 * p as u64 / div as u64).try_into().unwrap(); let q: u32 = (1_000_000 as u64 * q as u64 / div as u64).try_into().unwrap(); - let res = - (log2(p, q) as i64); + let res = -(log2(p, q) as i64); let expected = ((q as f64 / p as f64).log(2.0) * 1_000_000 as f64).round() as i64; assert!((res - expected).abs() <= 6); } @@ -124,4 +124,4 @@ fn test_log_of_largest_input() { let expected = 19_931_568; let tolerance = 100; assert!((log2(p, q) as i32 - expected as i32).abs() < tolerance); -} \ No newline at end of file +} diff --git a/substrate/frame/staking/reward-fn/src/lib.rs b/substrate/frame/staking/reward-fn/src/lib.rs index 205f0207673a3e5dddb590a7e23f46d3e257259b..3f91c39b4055056c467752fa324fdb84c68fb91c 100644 --- a/substrate/frame/staking/reward-fn/src/lib.rs +++ b/substrate/frame/staking/reward-fn/src/lib.rs @@ -19,8 +19,12 @@ //! Useful function for inflation for nominated proof of stake. -use sp_arithmetic::{Perquintill, PerThing, biguint::BigUint, traits::{Zero, SaturatedConversion}}; use core::convert::TryFrom; +use sp_arithmetic::{ + biguint::BigUint, + traits::{SaturatedConversion, Zero}, + PerThing, Perquintill, +}; /// Compute yearly inflation using function /// @@ -54,11 +58,7 @@ use core::convert::TryFrom; /// the global incentivization to get the `ideal_stake`. A higher number results in less typical /// inflation at the cost of greater volatility for validators. /// Must be more than 0.01. -pub fn compute_inflation( - stake: P, - ideal_stake: P, - falloff: P, -) -> P { +pub fn compute_inflation(stake: P, ideal_stake: P, falloff: P) -> P { if stake < ideal_stake { // ideal_stake is more than 0 because it is strictly more than stake return stake / ideal_stake @@ -98,9 +98,7 @@ pub fn compute_inflation( let res = compute_taylor_serie_part(&inpos_param); match u128::try_from(res.clone()) { - Ok(res) if res <= Into::::into(P::ACCURACY) => { - P::from_parts(res.saturated_into()) - }, + Ok(res) if res <= Into::::into(P::ACCURACY) => P::from_parts(res.saturated_into()), // If result is beyond bounds there is nothing we can do _ => { log::error!("Invalid inflation computation: unexpected result {:?}", res); @@ -109,7 +107,6 @@ pub fn compute_inflation( } } - /// Internal struct holding parameter info alongside other cached value. /// /// All expressed in part from `accuracy` @@ -149,12 +146,15 @@ fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { taylor_sum = taylor_sum.add(&last_taylor_term); } else { if taylor_sum >= last_taylor_term { - taylor_sum = taylor_sum.sub(&last_taylor_term) + taylor_sum = taylor_sum + .sub(&last_taylor_term) // NOTE: Should never happen as checked above .unwrap_or_else(|e| e); } else { taylor_sum_positive = !taylor_sum_positive; - taylor_sum = last_taylor_term.clone().sub(&taylor_sum) + taylor_sum = last_taylor_term + .clone() + .sub(&taylor_sum) // NOTE: Should never happen as checked above .unwrap_or_else(|e| e); } @@ -180,14 +180,13 @@ fn compute_taylor_serie_part(p: &INPoSParam) -> BigUint { /// /// `previous_taylor_term` and result are expressed with accuracy `INPoSParam.accuracy` fn compute_taylor_term(k: u32, previous_taylor_term: &BigUint, p: &INPoSParam) -> BigUint { - let x_minus_x_ideal = p.x.clone().sub(&p.x_ideal) - // NOTE: Should never happen, as x must be more than x_ideal - .unwrap_or_else(|_| BigUint::zero()); + let x_minus_x_ideal = + p.x.clone() + .sub(&p.x_ideal) + // NOTE: Should never happen, as x must be more than x_ideal + .unwrap_or_else(|_| BigUint::zero()); - let res = previous_taylor_term.clone() - .mul(&x_minus_x_ideal) - .mul(&p.ln2_div_d) - .div_unit(k); + let res = previous_taylor_term.clone().mul(&x_minus_x_ideal).mul(&p.ln2_div_d).div_unit(k); // p.accuracy is stripped by definition. let res = div_by_stripped(res, &p.accuracy); @@ -230,7 +229,5 @@ fn div_by_stripped(mut a: BigUint, b: &BigUint) -> BigUint { .div_unit(100_000) } - a.div(b, false) - .map(|res| res.0) - .unwrap_or_else(|| BigUint::zero()) + a.div(b, false).map(|res| res.0).unwrap_or_else(|| BigUint::zero()) } diff --git a/substrate/frame/staking/reward-fn/tests/test.rs b/substrate/frame/staking/reward-fn/tests/test.rs index 32daf9d09a76d4158ecb1538294c9631fe29ac95..dc5b661c4098dfb13419cd65b94b6a0f406b9a69 100644 --- a/substrate/frame/staking/reward-fn/tests/test.rs +++ b/substrate/frame/staking/reward-fn/tests/test.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use sp_arithmetic::{PerThing, Perbill, PerU16, Percent, Perquintill}; +use sp_arithmetic::{PerThing, PerU16, Perbill, Percent, Perquintill}; /// This test the precision and panics if error too big error. /// @@ -32,7 +32,7 @@ fn test_precision(stake: P, ideal_stake: P, falloff: P) { if error > 8f64 / accuracy_f64 && error > 8.0 * f64::EPSILON { panic!( "stake: {:?}, ideal_stake: {:?}, falloff: {:?}, res: {}, expect: {}", - stake, ideal_stake, falloff, res , expect + stake, ideal_stake, falloff, res, expect ); } } diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index ff7be272eec8107b079cd317cffdf952ded314b1..15a20dfb937c713834aacae2e85eca004470f860 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -21,11 +21,11 @@ use super::*; use crate::Pallet as Staking; use testing_utils::*; -use sp_runtime::traits::One; -use frame_system::RawOrigin; pub use frame_benchmarking::{ - benchmarks, account, whitelisted_caller, whitelist_account, impl_benchmark_test_suite, + account, benchmarks, impl_benchmark_test_suite, whitelist_account, whitelisted_caller, }; +use frame_system::RawOrigin; +use sp_runtime::traits::One; const SEED: u32 = 0; const MAX_SPANS: u32 = 100; @@ -36,13 +36,15 @@ const MAX_SLASHES: u32 = 1000; // Add slashing spans to a user account. Not relevant for actual use, only to benchmark // read and write operations. fn add_slashing_spans(who: &T::AccountId, spans: u32) { - if spans == 0 { return } + if spans == 0 { + return + } // For the first slashing span, we initialize let mut slashing_spans = crate::slashing::SlashingSpans::new(0); SpanSlash::::insert((who, 0), crate::slashing::SpanRecord::default()); - for i in 1 .. spans { + for i in 1..spans { assert!(slashing_spans.end_span(i)); SpanSlash::::insert((who, i), crate::slashing::SpanRecord::default()); } @@ -56,7 +58,7 @@ pub fn create_validator_with_nominators( n: u32, upper_bound: u32, dead: bool, - destination: RewardDestination + destination: RewardDestination, ) -> Result<(T::AccountId, Vec<(T::AccountId, T::AccountId)>), &'static str> { // Clean up any existing state. clear_validators_and_nominators::(); @@ -64,10 +66,8 @@ pub fn create_validator_with_nominators( let mut points_individual = Vec::new(); let (v_stash, v_controller) = create_stash_controller::(0, 100, destination.clone())?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - .. Default::default() - }; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller).into(), validator_prefs)?; let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); @@ -77,14 +77,17 @@ pub fn create_validator_with_nominators( let mut nominators = Vec::new(); // Give the validator n nominators, but keep total users in the system the same. - for i in 0 .. upper_bound { + for i in 0..upper_bound { let (n_stash, n_controller) = if !dead { create_stash_controller::(u32::MAX - i, 100, destination.clone())? } else { create_stash_and_dead_controller::(u32::MAX - i, 100, destination.clone())? }; if i < n { - Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), vec![stash_lookup.clone()])?; + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + vec![stash_lookup.clone()], + )?; nominators.push((n_stash, n_controller)); } } @@ -639,7 +642,7 @@ benchmarks! { #[cfg(test)] mod tests { use super::*; - use crate::mock::{ExtBuilder, Test, Balances, Staking, Origin}; + use crate::mock::{Balances, ExtBuilder, Origin, Staking, Test}; use frame_support::assert_ok; #[test] @@ -654,7 +657,8 @@ mod tests { ::MAX_NOMINATIONS as usize, false, None, - ).unwrap(); + ) + .unwrap(); let count_validators = Validators::::iter().count(); let count_nominators = Nominators::::iter().count(); @@ -674,7 +678,8 @@ mod tests { ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, - ).unwrap(); + ) + .unwrap(); assert_eq!(nominators.len() as u32, n); @@ -698,7 +703,8 @@ mod tests { ::MaxNominatorRewardedPerValidator::get() as u32, false, RewardDestination::Staked, - ).unwrap(); + ) + .unwrap(); // Add 20 slashing spans let num_of_slashing_spans = 20; @@ -706,14 +712,14 @@ mod tests { let slashing_spans = SlashingSpans::::get(&validator_stash).unwrap(); assert_eq!(slashing_spans.iter().count(), num_of_slashing_spans as usize); - for i in 0 .. num_of_slashing_spans { + for i in 0..num_of_slashing_spans { assert!(SpanSlash::::contains_key((&validator_stash, i))); } // Test everything is cleaned up assert_ok!(Staking::kill_stash(&validator_stash, num_of_slashing_spans)); assert!(SlashingSpans::::get(&validator_stash).is_none()); - for i in 0 .. num_of_slashing_spans { + for i in 0..num_of_slashing_spans { assert!(!SpanSlash::::contains_key((&validator_stash, i))); } }); @@ -726,13 +732,17 @@ mod tests { let n = 100; let selected_benchmark = SelectedBenchmark::payout_all; - let c = vec![(frame_benchmarking::BenchmarkParameter::v, v), (frame_benchmarking::BenchmarkParameter::n, n)]; + let c = vec![ + (frame_benchmarking::BenchmarkParameter::v, v), + (frame_benchmarking::BenchmarkParameter::n, n), + ]; let closure_to_benchmark = >::instance( &selected_benchmark, &c, - true - ).unwrap(); + true, + ) + .unwrap(); assert_ok!(closure_to_benchmark()); }); diff --git a/substrate/frame/staking/src/inflation.rs b/substrate/frame/staking/src/inflation.rs index e5259543fd4ba9d3ed10ed8dfc4ef78d235cb558..6f2bfe06ac24789bc3c055d9fcc6b19ea127be6b 100644 --- a/substrate/frame/staking/src/inflation.rs +++ b/substrate/frame/staking/src/inflation.rs @@ -20,7 +20,7 @@ //! The staking rate in NPoS is the total amount of tokens staked by nominators and validators, //! divided by the total token supply. -use sp_runtime::{Perbill, traits::AtLeast32BitUnsigned, curve::PiecewiseLinear}; +use sp_runtime::{curve::PiecewiseLinear, traits::AtLeast32BitUnsigned, Perbill}; /// The total payout to all validators (and their nominators) per era and maximum payout. /// @@ -33,16 +33,18 @@ pub fn compute_total_payout( yearly_inflation: &PiecewiseLinear<'static>, npos_token_staked: N, total_tokens: N, - era_duration: u64 -) -> (N, N) where N: AtLeast32BitUnsigned + Clone { + era_duration: u64, +) -> (N, N) +where + N: AtLeast32BitUnsigned + Clone, +{ // Milliseconds per year for the Julian year (365.25 days). const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; let portion = Perbill::from_rational(era_duration as u64, MILLISECONDS_PER_YEAR); - let payout = portion * yearly_inflation.calculate_for_fraction_times_denominator( - npos_token_staked, - total_tokens.clone(), - ); + let payout = portion * + yearly_inflation + .calculate_for_fraction_times_denominator(npos_token_staked, total_tokens.clone()); let maximum = portion * (yearly_inflation.maximum * total_tokens); (payout, maximum) } @@ -70,7 +72,7 @@ mod test { // not 10_000 due to rounding error. assert_eq!(super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).1, 9_993); - //super::I_NPOS.calculate_for_fraction_times_denominator(25, 100) + // super::I_NPOS.calculate_for_fraction_times_denominator(25, 100) assert_eq!(super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).0, 2_498); assert_eq!(super::compute_total_payout(&I_NPOS, 5_000, 100_000u64, YEAR).0, 3_248); assert_eq!(super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, YEAR).0, 6_246); @@ -98,7 +100,8 @@ mod test { 2_500_000_000_000_000_000_000_000_000u128, 5_000_000_000_000_000_000_000_000_000u128, HOUR - ).0, + ) + .0, 57_038_500_000_000_000_000_000 ); } diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 340e1a2a3f0712fc3f26d5d5fb9ce55bb9b88300..594773f658ec190e23b7beb0b15e180fd05b968c 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -267,57 +267,49 @@ #![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(any(feature = "runtime-benchmarks", test))] +pub mod benchmarking; #[cfg(test)] mod mock; -#[cfg(test)] -mod tests; #[cfg(any(feature = "runtime-benchmarks", test))] pub mod testing_utils; -#[cfg(any(feature = "runtime-benchmarks", test))] -pub mod benchmarking; +#[cfg(test)] +mod tests; -pub mod slashing; pub mod inflation; +pub mod slashing; pub mod weights; -use sp_std::{ - result, - prelude::*, - collections::btree_map::BTreeMap, - convert::From, -}; -use codec::{HasCompact, Encode, Decode}; +use codec::{Decode, Encode, HasCompact}; +use frame_election_provider_support::{data_provider, ElectionProvider, Supports, VoteWeight}; use frame_support::{ pallet_prelude::*, + traits::{ + Currency, CurrencyToVote, EnsureOrigin, EstimateNextNewSession, Get, Imbalance, + LockIdentifier, LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, + }, weights::{ - Weight, WithPostDispatchInfo, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, - }, - traits::{ - Currency, LockIdentifier, LockableCurrency, WithdrawReasons, OnUnbalanced, Imbalance, Get, - UnixTime, EstimateNextNewSession, EnsureOrigin, CurrencyToVote, + Weight, WithPostDispatchInfo, }, }; +use frame_system::{ensure_root, ensure_signed, offchain::SendTransactionTypes, pallet_prelude::*}; +pub use pallet::*; use pallet_session::historical; use sp_runtime::{ - Percent, Perbill, RuntimeDebug, DispatchError, curve::PiecewiseLinear, traits::{ - Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, - AtLeast32BitUnsigned, Bounded, + AtLeast32BitUnsigned, Bounded, CheckedSub, Convert, SaturatedConversion, Saturating, + StaticLookup, Zero, }, + DispatchError, Perbill, Percent, RuntimeDebug, }; use sp_staking::{ + offence::{Offence, OffenceDetails, OffenceError, OnOffenceHandler, ReportOffence}, SessionIndex, - offence::{OnOffenceHandler, OffenceDetails, Offence, ReportOffence, OffenceError}, }; -use frame_system::{ - ensure_signed, ensure_root, pallet_prelude::*, - offchain::SendTransactionTypes, -}; -use frame_election_provider_support::{ElectionProvider, VoteWeight, Supports, data_provider}; +use sp_std::{collections::btree_map::BTreeMap, convert::From, prelude::*, result}; pub use weights::WeightInfo; -pub use pallet::*; const STAKING_ID: LockIdentifier = *b"staking "; pub(crate) const LOG_TARGET: &'static str = "runtime::staking"; @@ -423,10 +415,7 @@ pub struct ValidatorPrefs { impl Default for ValidatorPrefs { fn default() -> Self { - ValidatorPrefs { - commission: Default::default(), - blocked: false, - } + ValidatorPrefs { commission: Default::default(), blocked: false } } } @@ -462,20 +451,23 @@ pub struct StakingLedger { pub claimed_rewards: Vec, } -impl< - AccountId, - Balance: HasCompact + Copy + Saturating + AtLeast32BitUnsigned, -> StakingLedger { +impl + StakingLedger +{ /// Remove entries from `unlocking` that are sufficiently old and reduce the /// total by the sum of their balances. fn consolidate_unlocked(self, current_era: EraIndex) -> Self { let mut total = self.total; - let unlocking = self.unlocking.into_iter() - .filter(|chunk| if chunk.era > current_era { - true - } else { - total = total.saturating_sub(chunk.value); - false + let unlocking = self + .unlocking + .into_iter() + .filter(|chunk| { + if chunk.era > current_era { + true + } else { + total = total.saturating_sub(chunk.value); + false + } }) .collect(); @@ -484,7 +476,7 @@ impl< total, active: self.active, unlocking, - claimed_rewards: self.claimed_rewards + claimed_rewards: self.claimed_rewards, } } @@ -514,7 +506,8 @@ impl< } } -impl StakingLedger where +impl StakingLedger +where Balance: AtLeast32BitUnsigned + Saturating + Copy, { /// Slash the validator for a given amount of balance. This can grow the value @@ -523,39 +516,34 @@ impl StakingLedger where /// /// Slashes from `active` funds first, and then `unlocking`, starting with the /// chunks that are closest to unlocking. - fn slash( - &mut self, - mut value: Balance, - minimum_balance: Balance, - ) -> Balance { + fn slash(&mut self, mut value: Balance, minimum_balance: Balance) -> Balance { let pre_total = self.total; let total = &mut self.total; let active = &mut self.active; - let slash_out_of = | - total_remaining: &mut Balance, - target: &mut Balance, - value: &mut Balance, - | { - let mut slash_from_target = (*value).min(*target); + let slash_out_of = + |total_remaining: &mut Balance, target: &mut Balance, value: &mut Balance| { + let mut slash_from_target = (*value).min(*target); - if !slash_from_target.is_zero() { - *target -= slash_from_target; + if !slash_from_target.is_zero() { + *target -= slash_from_target; - // Don't leave a dust balance in the staking system. - if *target <= minimum_balance { - slash_from_target += *target; - *value += sp_std::mem::replace(target, Zero::zero()); - } + // Don't leave a dust balance in the staking system. + if *target <= minimum_balance { + slash_from_target += *target; + *value += sp_std::mem::replace(target, Zero::zero()); + } - *total_remaining = total_remaining.saturating_sub(slash_from_target); - *value -= slash_from_target; - } - }; + *total_remaining = total_remaining.saturating_sub(slash_from_target); + *value -= slash_from_target; + } + }; slash_out_of(total, active, &mut value); - let i = self.unlocking.iter_mut() + let i = self + .unlocking + .iter_mut() .map(|chunk| { slash_out_of(total, &mut chunk.value, &mut value); chunk.value @@ -641,7 +629,8 @@ pub trait SessionInterface: frame_system::Config { fn prune_historical_up_to(up_to: SessionIndex); } -impl SessionInterface<::AccountId> for T where +impl SessionInterface<::AccountId> for T +where T: pallet_session::Config::AccountId>, T: pallet_session::historical::Config< FullIdentification = Exposure<::AccountId, BalanceOf>, @@ -649,8 +638,10 @@ impl SessionInterface<::AccountId> for T w >, T::SessionHandler: pallet_session::SessionHandler<::AccountId>, T::SessionManager: pallet_session::SessionManager<::AccountId>, - T::ValidatorIdOf: - Convert<::AccountId, Option<::AccountId>>, + T::ValidatorIdOf: Convert< + ::AccountId, + Option<::AccountId>, + >, { fn disable_validator(validator: &::AccountId) -> Result { >::disable(validator) @@ -691,10 +682,9 @@ impl EraPayout for () { /// Adaptor to turn a `PiecewiseLinear` curve definition into an `EraPayout` impl, used for /// backwards compatibility. pub struct ConvertCurve(sp_std::marker::PhantomData); -impl< - Balance: AtLeast32BitUnsigned + Clone, - T: Get<&'static PiecewiseLinear<'static>>, -> EraPayout for ConvertCurve { +impl>> + EraPayout for ConvertCurve +{ fn era_payout( total_staked: Balance, total_issuance: Balance, @@ -761,8 +751,14 @@ pub mod migrations { use super::*; pub fn pre_migrate() -> Result<(), &'static str> { - assert!(CounterForValidators::::get().is_zero(), "CounterForValidators already set."); - assert!(CounterForNominators::::get().is_zero(), "CounterForNominators already set."); + assert!( + CounterForValidators::::get().is_zero(), + "CounterForValidators already set." + ); + assert!( + CounterForNominators::::get().is_zero(), + "CounterForNominators already set." + ); assert!(StorageVersion::::get() == Releases::V6_0_0); Ok(()) } @@ -778,16 +774,14 @@ pub mod migrations { StorageVersion::::put(Releases::V7_0_0); log!(info, "Completed staking migration to Releases::V7_0_0"); - T::DbWeight::get().reads_writes( - validator_count.saturating_add(nominator_count).into(), - 2, - ) + T::DbWeight::get() + .reads_writes(validator_count.saturating_add(nominator_count).into(), 2) } } pub mod v6 { use super::*; - use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; + use frame_support::{generate_storage_alias, traits::Get, weights::Weight}; // NOTE: value type doesn't matter, we just set it to () here. generate_storage_alias!(Staking, SnapshotValidators => Value<()>); @@ -805,7 +799,10 @@ pub mod migrations { log!(info, "QueuedElected.exits()? {:?}", QueuedElected::exists()); log!(info, "QueuedScore.exits()? {:?}", QueuedScore::exists()); // these must exist. - assert!(IsCurrentSessionFinal::exists(), "IsCurrentSessionFinal storage item not found!"); + assert!( + IsCurrentSessionFinal::exists(), + "IsCurrentSessionFinal storage item not found!" + ); assert!(EraElectionStatus::exists(), "EraElectionStatus storage item not found!"); Ok(()) } @@ -926,7 +923,7 @@ pub mod pallet { #[pallet::extra_constants] impl Pallet { - //TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. + // TODO: rename to snake case after https://github.com/paritytech/substrate/issues/8826 fixed. #[allow(non_snake_case)] fn MaxNominations() -> u32 { T::MAX_NOMINATIONS @@ -934,7 +931,9 @@ pub mod pallet { } #[pallet::type_value] - pub(crate) fn HistoryDepthOnEmpty() -> u32 { 84u32 } + pub(crate) fn HistoryDepthOnEmpty() -> u32 { + 84u32 + } /// Number of eras to keep in history. /// @@ -980,28 +979,22 @@ pub mod pallet { /// Map from all (unlocked) "controller" accounts to the info regarding the staking. #[pallet::storage] #[pallet::getter(fn ledger)] - pub type Ledger = StorageMap< - _, - Blake2_128Concat, T::AccountId, - StakingLedger>, - >; + pub type Ledger = + StorageMap<_, Blake2_128Concat, T::AccountId, StakingLedger>>; /// Where the reward payment should be made. Keyed by stash. #[pallet::storage] #[pallet::getter(fn payee)] - pub type Payee = StorageMap< - _, - Twox64Concat, T::AccountId, - RewardDestination, - ValueQuery, - >; + pub type Payee = + StorageMap<_, Twox64Concat, T::AccountId, RewardDestination, ValueQuery>; /// The map from (wannabe) validator stash key to the preferences of that validator. /// /// When updating this storage item, you must also update the `CounterForValidators`. #[pallet::storage] #[pallet::getter(fn validators)] - pub type Validators = StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; + pub type Validators = + StorageMap<_, Twox64Concat, T::AccountId, ValidatorPrefs, ValueQuery>; /// A tracker to keep count of the number of items in the `Validators` map. #[pallet::storage] @@ -1018,7 +1011,8 @@ pub mod pallet { /// When updating this storage item, you must also update the `CounterForNominators`. #[pallet::storage] #[pallet::getter(fn nominators)] - pub type Nominators = StorageMap<_, Twox64Concat, T::AccountId, Nominations>; + pub type Nominators = + StorageMap<_, Twox64Concat, T::AccountId, Nominations>; /// A tracker to keep count of the number of items in the `Nominators` map. #[pallet::storage] @@ -1064,8 +1058,10 @@ pub mod pallet { #[pallet::getter(fn eras_stakers)] pub type ErasStakers = StorageDoubleMap< _, - Twox64Concat, EraIndex, - Twox64Concat, T::AccountId, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, Exposure>, ValueQuery, >; @@ -1085,8 +1081,10 @@ pub mod pallet { #[pallet::getter(fn eras_stakers_clipped)] pub type ErasStakersClipped = StorageDoubleMap< _, - Twox64Concat, EraIndex, - Twox64Concat, T::AccountId, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, Exposure>, ValueQuery, >; @@ -1101,8 +1099,10 @@ pub mod pallet { #[pallet::getter(fn eras_validator_prefs)] pub type ErasValidatorPrefs = StorageDoubleMap< _, - Twox64Concat, EraIndex, - Twox64Concat, T::AccountId, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, ValidatorPrefs, ValueQuery, >; @@ -1118,18 +1118,15 @@ pub mod pallet { /// If reward hasn't been set or has been removed then 0 reward is returned. #[pallet::storage] #[pallet::getter(fn eras_reward_points)] - pub type ErasRewardPoints = StorageMap< - _, - Twox64Concat, EraIndex, - EraRewardPoints, - ValueQuery, - >; + pub type ErasRewardPoints = + StorageMap<_, Twox64Concat, EraIndex, EraRewardPoints, ValueQuery>; /// The total amount staked for the last `HISTORY_DEPTH` eras. /// If total hasn't been set or has been removed then 0 stake is returned. #[pallet::storage] #[pallet::getter(fn eras_total_stake)] - pub type ErasTotalStake = StorageMap<_, Twox64Concat, EraIndex, BalanceOf, ValueQuery>; + pub type ErasTotalStake = + StorageMap<_, Twox64Concat, EraIndex, BalanceOf, ValueQuery>; /// Mode of era forcing. #[pallet::storage] @@ -1153,7 +1150,8 @@ pub mod pallet { #[pallet::storage] pub type UnappliedSlashes = StorageMap< _, - Twox64Concat, EraIndex, + Twox64Concat, + EraIndex, Vec>>, ValueQuery, >; @@ -1163,37 +1161,38 @@ pub mod pallet { /// Must contains information for eras for the range: /// `[active_era - bounding_duration; active_era]` #[pallet::storage] - pub(crate) type BondedEras = StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; + pub(crate) type BondedEras = + StorageValue<_, Vec<(EraIndex, SessionIndex)>, ValueQuery>; /// All slashing events on validators, mapped by era to the highest slash proportion /// and slash value of the era. #[pallet::storage] pub(crate) type ValidatorSlashInEra = StorageDoubleMap< _, - Twox64Concat, EraIndex, - Twox64Concat, T::AccountId, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, (Perbill, BalanceOf), >; /// All slashing events on nominators, mapped by era to the highest slash value of the era. #[pallet::storage] - pub(crate) type NominatorSlashInEra = StorageDoubleMap< - _, - Twox64Concat, EraIndex, - Twox64Concat, T::AccountId, - BalanceOf, - >; + pub(crate) type NominatorSlashInEra = + StorageDoubleMap<_, Twox64Concat, EraIndex, Twox64Concat, T::AccountId, BalanceOf>; /// Slashing spans for stash accounts. #[pallet::storage] - pub(crate) type SlashingSpans = StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; + pub(crate) type SlashingSpans = + StorageMap<_, Twox64Concat, T::AccountId, slashing::SlashingSpans>; /// Records information about the maximum slash of a stash within a slashing span, /// as well as how much reward has been paid out. #[pallet::storage] pub(crate) type SpanSlash = StorageMap< _, - Twox64Concat, (T::AccountId, slashing::SpanIndex), + Twox64Concat, + (T::AccountId, slashing::SpanIndex), slashing::SpanRecord>, ValueQuery, >; @@ -1280,18 +1279,15 @@ pub mod pallet { RewardDestination::Staked, ); let _ = match status { - StakerStatus::Validator => { - >::validate( - T::Origin::from(Some(controller.clone()).into()), - Default::default(), - ) - }, - StakerStatus::Nominator(votes) => { - >::nominate( - T::Origin::from(Some(controller.clone()).into()), - votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), - ) - }, _ => Ok(()) + StakerStatus::Validator => >::validate( + T::Origin::from(Some(controller.clone()).into()), + Default::default(), + ), + StakerStatus::Nominator(votes) => >::nominate( + T::Origin::from(Some(controller.clone()).into()), + votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), + ), + _ => Ok(()), }; } } @@ -1536,7 +1532,10 @@ pub mod pallet { ledger.total += extra; ledger.active += extra; // Last check: the new active amount of ledger must be more than ED. - ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); + ensure!( + ledger.active >= T::Currency::minimum_balance(), + Error::::InsufficientBond + ); Self::deposit_event(Event::::Bonded(stash, extra)); Self::update_ledger(&controller, &ledger); @@ -1564,13 +1563,13 @@ pub mod pallet { /// /// See also [`Call::withdraw_unbonded`]. #[pallet::weight(T::WeightInfo::unbond())] - pub fn unbond(origin: OriginFor, #[pallet::compact] value: BalanceOf) -> DispatchResult { + pub fn unbond( + origin: OriginFor, + #[pallet::compact] value: BalanceOf, + ) -> DispatchResult { let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - ensure!( - ledger.unlocking.len() < MAX_UNLOCKING_CHUNKS, - Error::::NoMoreChunks, - ); + ensure!(ledger.unlocking.len() < MAX_UNLOCKING_CHUNKS, Error::::NoMoreChunks,); let mut value = value.min(ledger.active); @@ -1631,22 +1630,23 @@ pub mod pallet { ledger = ledger.consolidate_unlocked(current_era) } - let post_info_weight = if ledger.unlocking.is_empty() && ledger.active < T::Currency::minimum_balance() { - // This account must have called `unbond()` with some value that caused the active - // portion to fall below existential deposit + will have no more unlocking chunks - // left. We can now safely remove all staking-related information. - Self::kill_stash(&stash, num_slashing_spans)?; - // Remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); - // This is worst case scenario, so we use the full weight and return None - None - } else { - // This was the consequence of a partial unbond. just update the ledger and move on. - Self::update_ledger(&controller, &ledger); + let post_info_weight = + if ledger.unlocking.is_empty() && ledger.active < T::Currency::minimum_balance() { + // This account must have called `unbond()` with some value that caused the active + // portion to fall below existential deposit + will have no more unlocking chunks + // left. We can now safely remove all staking-related information. + Self::kill_stash(&stash, num_slashing_spans)?; + // Remove the lock. + T::Currency::remove_lock(STAKING_ID, &stash); + // This is worst case scenario, so we use the full weight and return None + None + } else { + // This was the consequence of a partial unbond. just update the ledger and move on. + Self::update_ledger(&controller, &ledger); - // This is only an update, so we use less overall weight. - Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) - }; + // This is only an update, so we use less overall weight. + Some(T::WeightInfo::withdraw_unbonded_update(num_slashing_spans)) + }; // `old_total` should never be less than the new total because // `consolidate_unlocked` strictly subtracts balance. @@ -1677,7 +1677,10 @@ pub mod pallet { // If this error is reached, we need to adjust the `MinValidatorBond` and start calling `chill_other`. // Until then, we explicitly block new validators to protect the runtime. if let Some(max_validators) = MaxValidatorsCount::::get() { - ensure!(CounterForValidators::::get() < max_validators, Error::::TooManyValidators); + ensure!( + CounterForValidators::::get() < max_validators, + Error::::TooManyValidators + ); } } @@ -1713,7 +1716,10 @@ pub mod pallet { // If this error is reached, we need to adjust the `MinNominatorBond` and start calling `chill_other`. // Until then, we explicitly block new nominators to protect the runtime. if let Some(max_nominators) = MaxNominatorsCount::::get() { - ensure!(CounterForNominators::::get() < max_nominators, Error::::TooManyNominators); + ensure!( + CounterForNominators::::get() < max_nominators, + Error::::TooManyNominators + ); } } @@ -1722,13 +1728,18 @@ pub mod pallet { let old = Nominators::::get(stash).map_or_else(Vec::new, |x| x.targets); - let targets = targets.into_iter() + let targets = targets + .into_iter() .map(|t| T::Lookup::lookup(t).map_err(DispatchError::from)) - .map(|n| n.and_then(|n| if old.contains(&n) || !Validators::::get(&n).blocked { - Ok(n) - } else { - Err(Error::::BadTarget.into()) - })) + .map(|n| { + n.and_then(|n| { + if old.contains(&n) || !Validators::::get(&n).blocked { + Ok(n) + } else { + Err(Error::::BadTarget.into()) + } + }) + }) .collect::, _>>()?; let nominations = Nominations { @@ -2043,7 +2054,9 @@ pub mod pallet { /// NOTE: weights are assuming that payouts are made to alive stash account (Staked). /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. /// # - #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get()))] + #[pallet::weight(T::WeightInfo::payout_stakers_alive_staked( + T::MaxNominatorRewardedPerValidator::get() + ))] pub fn payout_stakers( origin: OriginFor, validator_stash: T::AccountId, @@ -2078,10 +2091,11 @@ pub mod pallet { Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); Self::update_ledger(&controller, &ledger); Ok(Some( - 35 * WEIGHT_PER_MICROS - + 50 * WEIGHT_PER_NANOS * (ledger.unlocking.len() as Weight) - + T::DbWeight::get().reads_writes(3, 2) - ).into()) + 35 * WEIGHT_PER_MICROS + + 50 * WEIGHT_PER_NANOS * (ledger.unlocking.len() as Weight) + + T::DbWeight::get().reads_writes(3, 2), + ) + .into()) } /// Set `HistoryDepth` value. This function will delete any history information @@ -2106,7 +2120,8 @@ pub mod pallet { /// - Writes Each: ErasValidatorReward, ErasRewardPoints, ErasTotalStake, ErasStartSessionIndex /// # #[pallet::weight(T::WeightInfo::set_history_depth(*_era_items_deleted))] - pub fn set_history_depth(origin: OriginFor, + pub fn set_history_depth( + origin: OriginFor, #[pallet::compact] new_history_depth: EraIndex, #[pallet::compact] _era_items_deleted: u32, ) -> DispatchResult { @@ -2164,20 +2179,29 @@ pub mod pallet { /// Note: Making this call only makes sense if you first set the validator preferences to /// block any further nominations. #[pallet::weight(T::WeightInfo::kick(who.len() as u32))] - pub fn kick(origin: OriginFor, who: Vec<::Source>) -> DispatchResult { + pub fn kick( + origin: OriginFor, + who: Vec<::Source>, + ) -> DispatchResult { let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; - for nom_stash in who.into_iter() + for nom_stash in who + .into_iter() .map(T::Lookup::lookup) .collect::, _>>()? .into_iter() { - Nominators::::mutate(&nom_stash, |maybe_nom| if let Some(ref mut nom) = maybe_nom { - if let Some(pos) = nom.targets.iter().position(|v| v == stash) { - nom.targets.swap_remove(pos); - Self::deposit_event(Event::::Kicked(nom_stash.clone(), stash.clone())); + Nominators::::mutate(&nom_stash, |maybe_nom| { + if let Some(ref mut nom) = maybe_nom { + if let Some(pos) = nom.targets.iter().position(|v| v == stash) { + nom.targets.swap_remove(pos); + Self::deposit_event(Event::::Kicked( + nom_stash.clone(), + stash.clone(), + )); + } } }); } @@ -2237,14 +2261,10 @@ pub mod pallet { /// /// This can be helpful if bond requirements are updated, and we need to remove old users /// who do not satisfy these requirements. - /// // TODO: Maybe we can deprecate `chill` in the future. // https://github.com/paritytech/substrate/issues/9111 #[pallet::weight(T::WeightInfo::chill_other())] - pub fn chill_other( - origin: OriginFor, - controller: T::AccountId, - ) -> DispatchResult { + pub fn chill_other(origin: OriginFor, controller: T::AccountId) -> DispatchResult { // Anyone can call this function. let caller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; @@ -2263,14 +2283,22 @@ pub mod pallet { if caller != controller { let threshold = ChillThreshold::::get().ok_or(Error::::CannotChillOther)?; let min_active_bond = if Nominators::::contains_key(&stash) { - let max_nominator_count = MaxNominatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let max_nominator_count = + MaxNominatorsCount::::get().ok_or(Error::::CannotChillOther)?; let current_nominator_count = CounterForNominators::::get(); - ensure!(threshold * max_nominator_count < current_nominator_count, Error::::CannotChillOther); + ensure!( + threshold * max_nominator_count < current_nominator_count, + Error::::CannotChillOther + ); MinNominatorBond::::get() } else if Validators::::contains_key(&stash) { - let max_validator_count = MaxValidatorsCount::::get().ok_or(Error::::CannotChillOther)?; + let max_validator_count = + MaxValidatorsCount::::get().ok_or(Error::::CannotChillOther)?; let current_validator_count = CounterForValidators::::get(); - ensure!(threshold * max_validator_count < current_validator_count, Error::::CannotChillOther); + ensure!( + threshold * max_validator_count < current_validator_count, + Error::::CannotChillOther + ); MinValidatorBond::::get() } else { Zero::zero() @@ -2313,41 +2341,46 @@ impl Pallet { }) } - fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResultWithPostInfo { + fn do_payout_stakers( + validator_stash: T::AccountId, + era: EraIndex, + ) -> DispatchResultWithPostInfo { // Validate input data let current_era = CurrentEra::::get().ok_or( - Error::::InvalidEraToReward.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)), )?; let history_depth = Self::history_depth(); ensure!( era <= current_era && era >= current_era.saturating_sub(history_depth), - Error::::InvalidEraToReward.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) ); // Note: if era has no reward to be claimed, era may be future. better not to update // `ledger.claimed_rewards` in this case. - let era_payout = >::get(&era) - .ok_or_else(|| - Error::::InvalidEraToReward - .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) - )?; + let era_payout = >::get(&era).ok_or_else(|| { + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; let controller = Self::bonded(&validator_stash).ok_or( - Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)), )?; let mut ledger = >::get(&controller).ok_or_else(|| Error::::NotController)?; - ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); + ledger + .claimed_rewards + .retain(|&x| x >= current_era.saturating_sub(history_depth)); match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => Err( - Error::::AlreadyClaimed.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) - )?, + Ok(_) => Err(Error::::AlreadyClaimed + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)))?, Err(pos) => ledger.claimed_rewards.insert(pos, era), } let exposure = >::get(&era, &ledger.stash); - /* Input data seems good, no errors allowed after this point */ + // Input data seems good, no errors allowed after this point >::insert(&controller, &ledger); @@ -2360,7 +2393,9 @@ impl Pallet { let era_reward_points = >::get(&era); let total_reward_points = era_reward_points.total; - let validator_reward_points = era_reward_points.individual.get(&ledger.stash) + let validator_reward_points = era_reward_points + .individual + .get(&ledger.stash) .map(|points| *points) .unwrap_or_else(|| Zero::zero()); @@ -2371,10 +2406,8 @@ impl Pallet { // This is the fraction of the total reward that the validator and the // nominators will get. - let validator_total_reward_part = Perbill::from_rational( - validator_reward_points, - total_reward_points, - ); + let validator_total_reward_part = + Perbill::from_rational(validator_reward_points, total_reward_points); // This is how much validator + nominators are entitled to. let validator_total_payout = validator_total_reward_part * era_payout; @@ -2386,17 +2419,13 @@ impl Pallet { let validator_leftover_payout = validator_total_payout - validator_commission_payout; // Now let's calculate how this is split to the validator. - let validator_exposure_part = Perbill::from_rational( - exposure.own, - exposure.total, - ); + let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; // We can now make total validator payout: - if let Some(imbalance) = Self::make_payout( - &ledger.stash, - validator_staking_payout + validator_commission_payout - ) { + if let Some(imbalance) = + Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) + { Self::deposit_event(Event::::Reward(ledger.stash, imbalance.peek())); } @@ -2407,12 +2436,10 @@ impl Pallet { // Lets now calculate how this is split to the nominators. // Reward only the clipped exposures. Note this is not necessarily sorted. for nominator in exposure.others.iter() { - let nominator_exposure_part = Perbill::from_rational( - nominator.value, - exposure.total, - ); + let nominator_exposure_part = Perbill::from_rational(nominator.value, exposure.total); - let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; + let nominator_reward: BalanceOf = + nominator_exposure_part * validator_leftover_payout; // We can now make nominator payout: if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. @@ -2430,14 +2457,9 @@ impl Pallet { /// This will also update the stash lock. fn update_ledger( controller: &T::AccountId, - ledger: &StakingLedger> + ledger: &StakingLedger>, ) { - T::Currency::set_lock( - STAKING_ID, - &ledger.stash, - ledger.total, - WithdrawReasons::all(), - ); + T::Currency::set_lock(STAKING_ID, &ledger.stash, ledger.total, WithdrawReasons::all()); >::insert(controller, ledger); } @@ -2456,11 +2478,8 @@ impl Pallet { let dest = Self::payee(stash); match dest { RewardDestination::Controller => Self::bonded(stash) - .and_then(|controller| - Some(T::Currency::deposit_creating(&controller, amount)) - ), - RewardDestination::Stash => - T::Currency::deposit_into_existing(stash, amount).ok(), + .and_then(|controller| Some(T::Currency::deposit_creating(&controller, amount))), + RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), RewardDestination::Staked => Self::bonded(stash) .and_then(|c| Self::ledger(&c).map(|l| (c, l))) .and_then(|(controller, mut l)| { @@ -2470,9 +2489,8 @@ impl Pallet { Self::update_ledger(&controller, &l); r }), - RewardDestination::Account(dest_account) => { - Some(T::Currency::deposit_creating(&dest_account, amount)) - }, + RewardDestination::Account(dest_account) => + Some(T::Currency::deposit_creating(&dest_account, amount)), RewardDestination::None => None, } } @@ -2487,8 +2505,8 @@ impl Pallet { 0 }); - let era_length = session_index.checked_sub(current_era_start_session_index) - .unwrap_or(0); // Must never happen. + let era_length = + session_index.checked_sub(current_era_start_session_index).unwrap_or(0); // Must never happen. match ForceEra::::get() { // Will be set to `NotForcing` again if a new era has been triggered. @@ -2506,8 +2524,8 @@ impl Pallet { // New era. let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis); - if maybe_new_era_validators.is_some() - && matches!(ForceEra::::get(), Forcing::ForceNew) + if maybe_new_era_validators.is_some() && + matches!(ForceEra::::get(), Forcing::ForceNew) { ForceEra::::put(Forcing::NotForcing); } @@ -2576,9 +2594,8 @@ impl Pallet { let first_kept = active_era - bonding_duration; // Prune out everything that's from before the first-kept index. - let n_to_prune = bonded.iter() - .take_while(|&&(era_idx, _)| era_idx < first_kept) - .count(); + let n_to_prune = + bonded.iter().take_while(|&&(era_idx, _)| era_idx < first_kept).count(); // Kill slashing metadata. for (pruned_era, _) in bonded.drain(..n_to_prune) { @@ -2647,7 +2664,10 @@ impl Pallet { /// In case election result has more than [`MinimumValidatorCount`] validator trigger a new era. /// /// In case a new era is planned, the new validator set is returned. - fn try_trigger_new_era(start_session_index: SessionIndex, is_genesis: bool) -> Option> { + fn try_trigger_new_era( + start_session_index: SessionIndex, + is_genesis: bool, + ) -> Option> { let (election_result, weight) = if is_genesis { T::GenesisElectionProvider::elect().map_err(|e| { log!(warn, "genesis election provider failed due to {:?}", e); @@ -2687,7 +2707,7 @@ impl Pallet { CurrentEra::::put(0); ErasStartSessionIndex::::insert(&0, &start_session_index); }, - _ => () + _ => (), } Self::deposit_event(Event::StakingElectionFailed); @@ -2766,7 +2786,7 @@ impl Pallet { .map(|(nominator, weight)| (nominator, to_currency(weight))) .for_each(|(nominator, stake)| { if nominator == validator { - own = own.saturating_add(stake); + own = own.saturating_add(stake); } else { others.push(IndividualExposure { who: nominator, value: stake }); } @@ -2817,16 +2837,18 @@ impl Pallet { /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. fn apply_unapplied_slashes(active_era: EraIndex) { let slash_defer_duration = T::SlashDeferDuration::get(); - ::EarliestUnappliedSlash::mutate(|earliest| if let Some(ref mut earliest) = earliest { - let keep_from = active_era.saturating_sub(slash_defer_duration); - for era in (*earliest)..keep_from { - let era_slashes = ::UnappliedSlashes::take(&era); - for slash in era_slashes { - slashing::apply_slash::(slash); + ::EarliestUnappliedSlash::mutate(|earliest| { + if let Some(ref mut earliest) = earliest { + let keep_from = active_era.saturating_sub(slash_defer_duration); + for era in (*earliest)..keep_from { + let era_slashes = ::UnappliedSlashes::take(&era); + for slash in era_slashes { + slashing::apply_slash::(slash); + } } - } - *earliest = (*earliest).max(keep_from) + *earliest = (*earliest).max(keep_from) + } }) } @@ -2841,9 +2863,7 @@ impl Pallet { /// relatively to their points. /// /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. - pub fn reward_by_ids( - validators_points: impl IntoIterator - ) { + pub fn reward_by_ids(validators_points: impl IntoIterator) { if let Some(active_era) = Self::active_era() { >::mutate(active_era.index, |era_rewards| { for (validator, points) in validators_points.into_iter() { @@ -2993,7 +3013,7 @@ impl frame_election_provider_support::ElectionDataProvider>::iter().count() as u32 == CounterForValidators::::get()); if maybe_max_len.map_or(false, |max_len| voter_count > max_len) { - return Err("Voter snapshot too big"); + return Err("Voter snapshot too big") } let slashing_span_count = >::iter().count(); @@ -3009,7 +3029,7 @@ impl frame_election_provider_support::ElectionDataProvider::get() as usize; if maybe_max_len.map_or(false, |max_len| target_count > max_len) { - return Err("Target snapshot too big"); + return Err("Target snapshot too big") } let weight = ::DbWeight::get().reads(target_count as u64); @@ -3066,10 +3086,7 @@ impl frame_election_provider_support::ElectionDataProvider historical::SessionManager Option>)>> { - >::new_session_genesis(new_index).map(|validators| { - let current_era = Self::current_era() - // Must be some as a new era has been created. - .unwrap_or(0); + >::new_session_genesis(new_index).map( + |validators| { + let current_era = Self::current_era() + // Must be some as a new era has been created. + .unwrap_or(0); - validators.into_iter().map(|v| { - let exposure = Self::eras_stakers(current_era, &v); - (v, exposure) - }).collect() - }) + validators + .into_iter() + .map(|v| { + let exposure = Self::eras_stakers(current_era, &v); + (v, exposure) + }) + .collect() + }, + ) } fn start_session(start_index: SessionIndex) { >::start_session(start_index) @@ -3228,10 +3253,7 @@ where Self::reward_by_ids(vec![(author, 20)]) } fn note_uncle(author: T::AccountId, _age: T::BlockNumber) { - Self::reward_by_ids(vec![ - (>::author(), 2), - (author, 1) - ]) + Self::reward_by_ids(vec![(>::author(), 2), (author, 1)]) } } @@ -3374,15 +3396,14 @@ where let reward_cost = (2, 2); add_db_reads_writes( (1 + nominators_len) * slash_cost.0 + reward_cost.0 * reporters_len, - (1 + nominators_len) * slash_cost.1 + reward_cost.1 * reporters_len + (1 + nominators_len) * slash_cost.1 + reward_cost.1 * reporters_len, ); } } else { // Defer to end of some `slash_defer_duration` from now. - ::UnappliedSlashes::mutate( - active_era, - move |for_later| for_later.push(unapplied), - ); + ::UnappliedSlashes::mutate(active_era, move |for_later| { + for_later.push(unapplied) + }); add_db_reads_writes(1, 1); } } else { @@ -3414,9 +3435,7 @@ where if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { R::report_offence(reporters, offence) } else { - >::deposit_event( - Event::::OldSlashingReportDiscarded(offence_session) - ); + >::deposit_event(Event::::OldSlashingReportDiscarded(offence_session)); Ok(()) } } diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 3242a40ccd45e1e573e557424a7adc0624f3fce8..d17076f4c36ff7f1b1f4b1f52bb288ac5e348fba 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -17,8 +17,9 @@ //! Test utilities -use crate::*; use crate as staking; +use crate::*; +use frame_election_provider_support::onchain; use frame_support::{ assert_ok, parameter_types, traits::{Currency, FindAuthor, Get, OnInitialize, OneSessionHandler}, @@ -33,7 +34,6 @@ use sp_runtime::{ }; use sp_staking::offence::{OffenceDetails, OnOffenceHandler}; use std::{cell::RefCell, collections::HashSet}; -use frame_election_provider_support::onchain; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; @@ -54,16 +54,19 @@ impl OneSessionHandler for OtherSessionHandler { type Key = UintAuthorityId; fn on_genesis_session<'a, I: 'a>(_: I) - where I: Iterator, AccountId: 'a {} + where + I: Iterator, + AccountId: 'a, + { + } - fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I,) - where I: Iterator, AccountId: 'a + fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I) + where + I: Iterator, + AccountId: 'a, { SESSION.with(|x| { - *x.borrow_mut() = ( - validators.map(|x| x.0.clone()).collect(), - HashSet::new(), - ) + *x.borrow_mut() = (validators.map(|x| x.0.clone()).collect(), HashSet::new()) }); } @@ -107,7 +110,8 @@ frame_support::construct_runtime!( pub struct Author11; impl FindAuthor for Author11 { fn find_author<'a, I>(_digests: I) -> Option - where I: 'a + IntoIterator, + where + I: 'a + IntoIterator, { Some(11) } @@ -376,21 +380,14 @@ impl ExtBuilder { } fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); - let mut storage = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - let balance_factor = if ExistentialDeposit::get() > 1 { - 256 - } else { - 1 - }; + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let balance_factor = if ExistentialDeposit::get() > 1 { 256 } else { 1 }; let num_validators = self.num_validators.unwrap_or(self.validator_count); // Check that the number of validators is sensible. assert!(num_validators <= 8); - let validators = (0..num_validators) - .map(|x| ((x + 1) * 10 + 1) as AccountId) - .collect::>(); + let validators = + (0..num_validators).map(|x| ((x + 1) * 10 + 1) as AccountId).collect::>(); let _ = pallet_balances::GenesisConfig:: { balances: vec![ @@ -419,7 +416,8 @@ impl ExtBuilder { // This allows us to have a total_payout different from 0. (999, 1_000_000_000_000), ], - }.assimilate_storage(&mut storage); + } + .assimilate_storage(&mut storage); let mut stakers = vec![]; if self.has_stakers { @@ -438,11 +436,11 @@ impl ExtBuilder { (31, 30, stake_31, StakerStatus::::Validator), (41, 40, balance_factor * 1000, status_41), // nominator - (101, 100, balance_factor * 500, StakerStatus::::Nominator(nominated)) + (101, 100, balance_factor * 500, StakerStatus::::Nominator(nominated)), ]; } - let _ = staking::GenesisConfig::{ - stakers: stakers, + let _ = staking::GenesisConfig:: { + stakers, validator_count: self.validator_count, minimum_validator_count: self.minimum_validator_count, invulnerables: self.invulnerables, @@ -454,12 +452,12 @@ impl ExtBuilder { .assimilate_storage(&mut storage); let _ = pallet_session::GenesisConfig:: { - keys: validators.iter().map(|x| ( - *x, - *x, - SessionKeys { other: UintAuthorityId(*x as u64) } - )).collect(), - }.assimilate_storage(&mut storage); + keys: validators + .iter() + .map(|x| (*x, *x, SessionKeys { other: UintAuthorityId(*x as u64) })) + .collect(), + } + .assimilate_storage(&mut storage); let mut ext = sp_io::TestExternalities::from(storage); ext.execute_with(|| { @@ -524,42 +522,46 @@ fn check_nominators() { // in if the nomination was submitted before the current era. let era = active_era(); >::iter() - .filter_map(|(nominator, nomination)| - if nomination.submitted_in > era { - Some(nominator) - } else { - None - }) + .filter_map( + |(nominator, nomination)| { + if nomination.submitted_in > era { + Some(nominator) + } else { + None + } + }, + ) .for_each(|nominator| { - // must be bonded. - assert_is_stash(nominator); - let mut sum = 0; - Session::validators() - .iter() - .map(|v| Staking::eras_stakers(era, v)) - .for_each(|e| { - let individual = e.others.iter().filter(|e| e.who == nominator).collect::>(); - let len = individual.len(); - match len { - 0 => { /* not supporting this validator at all. */ }, - 1 => sum += individual[0].value, - _ => panic!("nominator cannot back a validator more than once."), - }; - }); - - let nominator_stake = Staking::slashable_balance_of(&nominator); - // a nominator cannot over-spend. - assert!( - nominator_stake >= sum, - "failed: Nominator({}) stake({}) >= sum divided({})", - nominator, - nominator_stake, - sum, - ); + // must be bonded. + assert_is_stash(nominator); + let mut sum = 0; + Session::validators() + .iter() + .map(|v| Staking::eras_stakers(era, v)) + .for_each(|e| { + let individual = + e.others.iter().filter(|e| e.who == nominator).collect::>(); + let len = individual.len(); + match len { + 0 => { /* not supporting this validator at all. */ }, + 1 => sum += individual[0].value, + _ => panic!("nominator cannot back a validator more than once."), + }; + }); + + let nominator_stake = Staking::slashable_balance_of(&nominator); + // a nominator cannot over-spend. + assert!( + nominator_stake >= sum, + "failed: Nominator({}) stake({}) >= sum divided({})", + nominator, + nominator_stake, + sum, + ); - let diff = nominator_stake - sum; - assert!(diff < 100); - }); + let diff = nominator_stake - sum; + assert!(diff < 100); + }); } fn assert_is_stash(acc: AccountId) { @@ -569,10 +571,7 @@ fn assert_is_stash(acc: AccountId) { fn assert_ledger_consistent(ctrl: AccountId) { // ensures ledger.total == ledger.active + sum(ledger.unlocking). let ledger = Staking::ledger(ctrl).expect("Not a controller."); - let real_total: Balance = ledger - .unlocking - .iter() - .fold(ledger.active, |a, c| a + c.value); + let real_total: Balance = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); assert_eq!(real_total, ledger.total); assert!( ledger.active >= Balances::minimum_balance() || ledger.active == 0, @@ -594,16 +593,8 @@ pub(crate) fn current_era() -> EraIndex { pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { let _ = Balances::make_free_balance_be(&stash, val); let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond( - Origin::signed(stash), - ctrl, - val, - RewardDestination::Controller, - )); - assert_ok!(Staking::validate( - Origin::signed(ctrl), - ValidatorPrefs::default() - )); + assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller,)); + assert_ok!(Staking::validate(Origin::signed(ctrl), ValidatorPrefs::default())); } pub(crate) fn bond_nominator( @@ -614,12 +605,7 @@ pub(crate) fn bond_nominator( ) { let _ = Balances::make_free_balance_be(&stash, val); let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond( - Origin::signed(stash), - ctrl, - val, - RewardDestination::Controller, - )); + assert_ok!(Staking::bond(Origin::signed(stash), ctrl, val, RewardDestination::Controller,)); assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); } @@ -715,9 +701,7 @@ pub(crate) fn reward_time_per_era() -> u64 { } pub(crate) fn reward_all_elected() { - let rewards = ::SessionInterface::validators() - .into_iter() - .map(|v| (v, 1)); + let rewards = ::SessionInterface::validators().into_iter().map(|v| (v, 1)); >::reward_by_ids(rewards) } @@ -741,26 +725,28 @@ pub(crate) fn on_offence_in_era( for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { let _ = Staking::on_offence(offenders, slash_fraction, start_session); - return; + return } else if bonded_era > era { - break; + break } } if Staking::active_era().unwrap().index == era { - let _ = - Staking::on_offence( - offenders, - slash_fraction, - Staking::eras_start_session_index(era).unwrap() - ); + let _ = Staking::on_offence( + offenders, + slash_fraction, + Staking::eras_start_session_index(era).unwrap(), + ); } else { panic!("cannot slash in era {}", era); } } pub(crate) fn on_offence_now( - offenders: &[OffenceDetails>], + offenders: &[OffenceDetails< + AccountId, + pallet_session::historical::IdentificationTuple, + >], slash_fraction: &[Perbill], ) { let now = Staking::active_era().unwrap().index; @@ -769,29 +755,26 @@ pub(crate) fn on_offence_now( pub(crate) fn add_slash(who: &AccountId) { on_offence_now( - &[ - OffenceDetails { - offender: (who.clone(), Staking::eras_stakers(active_era(), who.clone())), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (who.clone(), Staking::eras_stakers(active_era(), who.clone())), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); } /// Make all validator and nominator request their payment pub(crate) fn make_all_reward_payment(era: EraIndex) { - let validators_with_reward = - ErasRewardPoints::::get(era).individual.keys().cloned().collect::>(); + let validators_with_reward = ErasRewardPoints::::get(era) + .individual + .keys() + .cloned() + .collect::>(); // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { let ledger = >::get(&validator_controller).unwrap(); - assert_ok!(Staking::payout_stakers( - Origin::signed(1337), - ledger.stash, - era - )); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), ledger.stash, era)); } } @@ -816,13 +799,11 @@ macro_rules! assert_session_era { } pub(crate) fn staking_events() -> Vec> { - System::events().into_iter().map(|r| r.event).filter_map(|e| { - if let Event::Staking(inner) = e { - Some(inner) - } else { - None - } - }).collect() + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::Staking(inner) = e { Some(inner) } else { None }) + .collect() } pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) { diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 1e959e9341add5d726da786667ee1cb7c508bd76..227043b656eef12a93cf5354689d4b15fa148d7d 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -50,16 +50,19 @@ //! Based on research at use super::{ - EraIndex, Config, Pallet, Store, BalanceOf, Exposure, Perbill, SessionInterface, - NegativeImbalanceOf, UnappliedSlash, Error, + BalanceOf, Config, EraIndex, Error, Exposure, NegativeImbalanceOf, Pallet, Perbill, + SessionInterface, Store, UnappliedSlash, }; -use sp_runtime::{traits::{Zero, Saturating}, RuntimeDebug, DispatchResult}; +use codec::{Decode, Encode}; use frame_support::{ ensure, - traits::{Currency, OnUnbalanced, Imbalance}, + traits::{Currency, Imbalance, OnUnbalanced}, +}; +use sp_runtime::{ + traits::{Saturating, Zero}, + DispatchResult, RuntimeDebug, }; use sp_std::vec::Vec; -use codec::{Encode, Decode}; /// The proportion of the slashing reward to be paid out on the first slashing detection. /// This is f_1 in the paper. @@ -118,7 +121,9 @@ impl SlashingSpans { // that internal state is unchanged. pub(crate) fn end_span(&mut self, now: EraIndex) -> bool { let next_start = now + 1; - if next_start <= self.last_start { return false } + if next_start <= self.last_start { + return false + } let last_length = next_start - self.last_start; self.prior.insert(0, last_length); @@ -153,7 +158,8 @@ impl SlashingSpans { // If this returns `Some`, then it includes a range start..end of all the span // indices which were pruned. fn prune(&mut self, window_start: EraIndex) -> Option<(SpanIndex, SpanIndex)> { - let old_idx = self.iter() + let old_idx = self + .iter() .skip(1) // skip ongoing span. .position(|span| span.length.map_or(false, |len| span.start + len <= window_start)); @@ -163,7 +169,7 @@ impl SlashingSpans { self.prior.truncate(o); let new_earliest = self.span_index - self.prior.len() as SpanIndex; Some((earliest_span_index, new_earliest)) - } + }, None => None, }; @@ -214,18 +220,11 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { /// /// The pending slash record returned does not have initialized reporters. Those have /// to be set at a higher level, if any. -pub(crate) fn compute_slash(params: SlashParams) - -> Option>> -{ - let SlashParams { - stash, - slash, - exposure, - slash_era, - window_start, - now, - reward_proportion, - } = params.clone(); +pub(crate) fn compute_slash( + params: SlashParams, +) -> Option>> { + let SlashParams { stash, slash, exposure, slash_era, window_start, now, reward_proportion } = + params.clone(); let mut reward_payout = Zero::zero(); let mut val_slashed = Zero::zero(); @@ -236,22 +235,17 @@ pub(crate) fn compute_slash(params: SlashParams) // kick out the validator even if they won't be slashed, // as long as the misbehavior is from their most recent slashing span. kick_out_if_recent::(params); - return None; + return None } - let (prior_slash_p, _era_slash) = as Store>::ValidatorSlashInEra::get( - &slash_era, - stash, - ).unwrap_or((Perbill::zero(), Zero::zero())); + let (prior_slash_p, _era_slash) = + as Store>::ValidatorSlashInEra::get(&slash_era, stash) + .unwrap_or((Perbill::zero(), Zero::zero())); // compare slash proportions rather than slash values to avoid issues due to rounding // error. if slash.deconstruct() > prior_slash_p.deconstruct() { - as Store>::ValidatorSlashInEra::insert( - &slash_era, - stash, - &(slash, own_slash), - ); + as Store>::ValidatorSlashInEra::insert(&slash_era, stash, &(slash, own_slash)); } else { // we slash based on the max in era - this new event is not the max, // so neither the validator or any nominators will need an update. @@ -260,7 +254,7 @@ pub(crate) fn compute_slash(params: SlashParams) // pays out some reward even if the latest report is not max-in-era. // we opt to avoid the nominator lookups and edits and leave more rewards // for more drastic misbehavior. - return None; + return None } // apply slash to validator. @@ -273,10 +267,7 @@ pub(crate) fn compute_slash(params: SlashParams) reward_proportion, ); - let target_span = spans.compare_and_update_span_slash( - slash_era, - own_slash, - ); + let target_span = spans.compare_and_update_span_slash(slash_era, own_slash); if target_span == Some(spans.span_index()) { // misbehavior occurred within the current slashing span - take appropriate @@ -309,9 +300,7 @@ pub(crate) fn compute_slash(params: SlashParams) // doesn't apply any slash, but kicks out the validator if the misbehavior is from // the most recent slashing span. -fn kick_out_if_recent( - params: SlashParams, -) { +fn kick_out_if_recent(params: SlashParams) { // these are not updated by era-span or end-span. let mut reward_payout = Zero::zero(); let mut val_slashed = Zero::zero(); @@ -343,15 +332,8 @@ fn slash_nominators( prior_slash_p: Perbill, nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, ) -> BalanceOf { - let SlashParams { - stash: _, - slash, - exposure, - slash_era, - window_start, - now, - reward_proportion, - } = params; + let SlashParams { stash: _, slash, exposure, slash_era, window_start, now, reward_proportion } = + params; let mut reward_payout = Zero::zero(); @@ -367,18 +349,12 @@ fn slash_nominators( let own_slash_by_validator = slash * nominator.value; let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); - let mut era_slash = as Store>::NominatorSlashInEra::get( - &slash_era, - stash, - ).unwrap_or_else(|| Zero::zero()); + let mut era_slash = as Store>::NominatorSlashInEra::get(&slash_era, stash) + .unwrap_or_else(|| Zero::zero()); era_slash += own_slash_difference; - as Store>::NominatorSlashInEra::insert( - &slash_era, - stash, - &era_slash, - ); + as Store>::NominatorSlashInEra::insert(&slash_era, stash, &era_slash); era_slash }; @@ -393,10 +369,7 @@ fn slash_nominators( reward_proportion, ); - let target_span = spans.compare_and_update_span_slash( - slash_era, - era_slash, - ); + let target_span = spans.compare_and_update_span_slash(slash_era, era_slash); if target_span == Some(spans.span_index()) { // End the span, but don't chill the nominator. its nomination @@ -497,8 +470,8 @@ impl<'a, T: 'a + Config> InspectingSpans<'a, T> { span_record.slashed = slash; // compute reward. - let reward = REWARD_F1 - * (self.reward_proportion * slash).saturating_sub(span_record.paid_out); + let reward = + REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out); self.add_slash(difference, slash_era); changed = true; @@ -529,7 +502,9 @@ impl<'a, T: 'a + Config> InspectingSpans<'a, T> { impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> { fn drop(&mut self) { // only update on disk if we slashed this account. - if !self.dirty { return } + if !self.dirty { + return + } if let Some((start, end)) = self.spans.prune(self.window_start) { for span_index in start..end { @@ -557,7 +532,10 @@ pub(crate) fn clear_stash_metadata( Some(s) => s, }; - ensure!(num_slashing_spans as usize >= spans.iter().count(), Error::::IncorrectSlashingSpans); + ensure!( + num_slashing_spans as usize >= spans.iter().count(), + Error::::IncorrectSlashingSpans + ); as Store>::SlashingSpans::remove(stash); @@ -606,9 +584,7 @@ pub fn do_slash( >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event( - super::Event::::Slash(stash.clone(), value) - ); + >::deposit_event(super::Event::::Slash(stash.clone(), value)); } } @@ -625,18 +601,12 @@ pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash( - &nominator, - nominator_slash, - &mut reward_payout, - &mut slashed_imbalance, - ); + do_slash::(&nominator, nominator_slash, &mut reward_payout, &mut slashed_imbalance); } pay_reporters::(reward_payout, slashed_imbalance, &unapplied_slash.reporters); } - /// Apply a reward payout to some reporters, paying the rewards out of the slashed imbalance. fn pay_reporters( reward_payout: BalanceOf, @@ -774,17 +744,13 @@ mod tests { assert_eq!(spans.prune(1000), Some((8, 10))); assert_eq!( spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 1000, length: None }, - ], + vec![SlashingSpan { index: 10, start: 1000, length: None },], ); assert_eq!(spans.prune(2000), None); assert_eq!( spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 2000, length: None }, - ], + vec![SlashingSpan { index: 10, start: 2000, length: None },], ); // now all in one shot. @@ -797,9 +763,7 @@ mod tests { assert_eq!(spans.prune(2000), Some((6, 10))); assert_eq!( spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 2000, length: None }, - ], + vec![SlashingSpan { index: 10, start: 2000, length: None },], ); } diff --git a/substrate/frame/staking/src/testing_utils.rs b/substrate/frame/staking/src/testing_utils.rs index 18b77d59b3e2e7388466d9e26f9f2481e2ea5dd5..0d9ae2c8e41a4f598603b4ae0f1c1207133eb86b 100644 --- a/substrate/frame/staking/src/testing_utils.rs +++ b/substrate/frame/staking/src/testing_utils.rs @@ -18,12 +18,14 @@ //! Testing utils for staking. Provides some common functions to setup staking state, such as //! bonding validators, nominators, and generating different types of solutions. -use crate::*; -use crate::Pallet as Staking; +use crate::{Pallet as Staking, *}; use frame_benchmarking::account; use frame_system::RawOrigin; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; use sp_io::hashing::blake2_256; -use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; const SEED: u32 = 0; @@ -54,14 +56,18 @@ pub fn create_stash_controller( n: u32, balance_factor: u32, destination: RewardDestination, -) - -> Result<(T::AccountId, T::AccountId), &'static str> -{ +) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); let controller = create_funded_user::("controller", n, balance_factor); - let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); - Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, destination)?; + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup, + amount, + destination, + )?; return Ok((stash, controller)) } @@ -71,15 +77,19 @@ pub fn create_stash_and_dead_controller( n: u32, balance_factor: u32, destination: RewardDestination, -) - -> Result<(T::AccountId, T::AccountId), &'static str> -{ +) -> Result<(T::AccountId, T::AccountId), &'static str> { let stash = create_funded_user::("stash", n, balance_factor); // controller has no funds let controller = create_funded_user::("controller", n, 0); - let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); let amount = T::Currency::minimum_balance() * (balance_factor / 10).max(1).into(); - Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, destination)?; + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup, + amount, + destination, + )?; return Ok((stash, controller)) } @@ -89,12 +99,11 @@ pub fn create_validators( balance_factor: u32, ) -> Result::Source>, &'static str> { let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); - for i in 0 .. max { - let (stash, controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - .. Default::default() - }; + for i in 0..max { + let (stash, controller) = + create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; let stash_lookup: ::Source = T::Lookup::unlookup(stash); validators.push(stash_lookup); @@ -126,20 +135,20 @@ pub fn create_validators_with_nominators_for_era( ) -> Result::Source>, &'static str> { clear_validators_and_nominators::(); - let mut validators_stash: Vec<::Source> - = Vec::with_capacity(validators as usize); + let mut validators_stash: Vec<::Source> = + Vec::with_capacity(validators as usize); let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); // Create validators - for i in 0 .. validators { + for i in 0..validators { let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; - let (v_stash, v_controller) = create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - .. Default::default() - }; + let (v_stash, v_controller) = + create_stash_controller::(i, balance_factor, RewardDestination::Staked)?; + let validator_prefs = + ValidatorPrefs { commission: Perbill::from_percent(50), ..Default::default() }; Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); + let stash_lookup: ::Source = + T::Lookup::unlookup(v_stash.clone()); validators_stash.push(stash_lookup.clone()); } @@ -147,25 +156,25 @@ pub fn create_validators_with_nominators_for_era( let validator_chosen = validators_stash[0..to_nominate].to_vec(); // Create nominators - for j in 0 .. nominators { + for j in 0..nominators { let balance_factor = if randomize_stake { rng.next_u32() % 255 + 10 } else { 100u32 }; - let (_n_stash, n_controller) = create_stash_controller::( - u32::MAX - j, - balance_factor, - RewardDestination::Staked, - )?; + let (_n_stash, n_controller) = + create_stash_controller::(u32::MAX - j, balance_factor, RewardDestination::Staked)?; // Have them randomly validate let mut available_validators = validator_chosen.clone(); let mut selected_validators: Vec<::Source> = Vec::with_capacity(edge_per_nominator); - for _ in 0 .. validators.min(edge_per_nominator as u32) { + for _ in 0..validators.min(edge_per_nominator as u32) { let selected = rng.next_u32() as usize % available_validators.len(); let validator = available_validators.remove(selected); selected_validators.push(validator); } - Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), selected_validators)?; + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + selected_validators, + )?; } ValidatorCount::::put(validators); diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index bbb0d5522fcc6ef2ed27d241f716281989cd8df8..9aae4cb15768d5dee51bb765b7bc0daaaa9e4597 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -17,21 +17,21 @@ //! Tests for the module. -use super::{*, Event}; +use super::{Event, *}; +use frame_election_provider_support::Support; +use frame_support::{ + assert_noop, assert_ok, + traits::{Currency, OnInitialize, ReservableCurrency}, + weights::{extract_actual_weight, GetDispatchInfo}, +}; use mock::*; +use pallet_balances::Error as BalancesError; use sp_runtime::{ assert_eq_error_rate, traits::{BadOrigin, Dispatchable}, }; use sp_staking::offence::OffenceDetails; -use frame_support::{ - assert_ok, assert_noop, - traits::{Currency, ReservableCurrency, OnInitialize}, - weights::{extract_actual_weight, GetDispatchInfo}, -}; -use pallet_balances::Error as BalancesError; use substrate_test_utils::assert_eq_uvec; -use frame_election_provider_support::Support; #[test] fn force_unstake_works() { @@ -48,7 +48,10 @@ fn force_unstake_works() { // Force unstake requires root. assert_noop!(Staking::force_unstake(Origin::signed(11), 11, 2), BadOrigin); // Force unstake needs correct number of slashing spans (for weight calculation) - assert_noop!(Staking::force_unstake(Origin::root(), 11, 0), Error::::IncorrectSlashingSpans); + assert_noop!( + Staking::force_unstake(Origin::root(), 11, 0), + Error::::IncorrectSlashingSpans + ); // We now force them to unstake assert_ok!(Staking::force_unstake(Origin::root(), 11, 2)); // No longer bonded. @@ -90,26 +93,47 @@ fn basic_setup_works() { // Account 10 controls the stash from account 11, which is 100 * balance_factor units assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + }) ); // Account 20 controls the stash from account 21, which is 200 * balance_factor units assert_eq!( Staking::ledger(&20), - Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) + Some(StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + }) ); // Account 1 does not control any stash assert_eq!(Staking::ledger(&1), None); // ValidatorPrefs are default - assert_eq_uvec!(>::iter().collect::>(), vec![ - (31, ValidatorPrefs::default()), - (21, ValidatorPrefs::default()), - (11, ValidatorPrefs::default()) - ]); + assert_eq_uvec!( + >::iter().collect::>(), + vec![ + (31, ValidatorPrefs::default()), + (21, ValidatorPrefs::default()), + (11, ValidatorPrefs::default()) + ] + ); assert_eq!( Staking::ledger(100), - Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![], claimed_rewards: vec![] }) + Some(StakingLedger { + stash: 101, + total: 500, + active: 500, + unlocking: vec![], + claimed_rewards: vec![] + }) ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); @@ -118,7 +142,7 @@ fn basic_setup_works() { Exposure { total: 1125, own: 1000, - others: vec![ IndividualExposure { who: 101, value: 125 }] + others: vec![IndividualExposure { who: 101, value: 125 }] }, ); assert_eq!( @@ -126,14 +150,13 @@ fn basic_setup_works() { Exposure { total: 1375, own: 1000, - others: vec![ IndividualExposure { who: 101, value: 375 }] + others: vec![IndividualExposure { who: 101, value: 375 }] }, ); // initial total stake = 1125 + 1375 assert_eq!(Staking::eras_total_stake(Staking::active_era().unwrap().index), 2500); - // The number of validators required. assert_eq!(Staking::validator_count(), 2); @@ -245,9 +268,9 @@ fn rewards_should_work() { assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2,); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * total_payout_0 * 2/3 - + part_for_100_from_20 * total_payout_0 * 1/3, + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); @@ -283,9 +306,9 @@ fn rewards_should_work() { assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2,); assert_eq_error_rate!( Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) - + part_for_100_from_20 * total_payout_0 * 1/3, + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, 2 ); assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); @@ -302,7 +325,9 @@ fn staking_should_work() { assert_eq_uvec!(validator_controllers(), vec![20, 10]); // put some money in account that we'll use. - for i in 1..5 { let _ = Balances::make_free_balance_be(&i, 2000); } + for i in 1..5 { + let _ = Balances::make_free_balance_be(&i, 2000); + } // --- Block 2: start_session(2); @@ -319,7 +344,6 @@ fn staking_should_work() { // No effects will be seen so far. Era has not been yet triggered. assert_eq_uvec!(validator_controllers(), vec![20, 10]); - // --- Block 4: the validators will now be queued. start_session(4); assert_eq!(Staking::active_era().unwrap().index, 1); @@ -375,7 +399,10 @@ fn blocking_and_kicking_works() { .num_validators(3) .build_and_execute(|| { // block validator 10/11 - assert_ok!(Staking::validate(Origin::signed(10), ValidatorPrefs { blocked: true, .. Default::default() })); + assert_ok!(Staking::validate( + Origin::signed(10), + ValidatorPrefs { blocked: true, ..Default::default() } + )); // attempt to nominate from 100/101... assert_ok!(Staking::nominate(Origin::signed(100), vec![11])); // should have worked since we're already nominated them @@ -385,7 +412,10 @@ fn blocking_and_kicking_works() { // should have been kicked now assert!(Nominators::::get(&101).unwrap().targets.is_empty()); // attempt to nominate from 100/101... - assert_noop!(Staking::nominate(Origin::signed(100), vec![11]), Error::::BadTarget); + assert_noop!( + Staking::nominate(Origin::signed(100), vec![11]), + Error::::BadTarget + ); }); } @@ -408,10 +438,8 @@ fn less_than_needed_candidates_works() { // But the exposure is updated in a simple way. No external votes exists. // This is purely self-vote. - assert!( - ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index) - .all(|exposure| exposure.others.is_empty()) - ); + assert!(ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index) + .all(|exposure| exposure.others.is_empty())); }); } @@ -426,7 +454,7 @@ fn no_candidate_emergency_condition() { .build_and_execute(|| { // initial validators assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - let prefs = ValidatorPrefs { commission: Perbill::one(), .. Default::default() }; + let prefs = ValidatorPrefs { commission: Perbill::one(), ..Default::default() }; ::Validators::insert(11, prefs.clone()); // set the minimum validator count. @@ -440,10 +468,7 @@ fn no_candidate_emergency_condition() { // try trigger new era mock::run_to_block(20); - assert_eq!( - *staking_events().last().unwrap(), - Event::StakingElectionFailed, - ); + assert_eq!(*staking_events().last().unwrap(), Event::StakingElectionFailed,); // No new era is created assert_eq!(current_era, CurrentEra::::get()); @@ -506,7 +531,11 @@ fn nominating_and_rewards_should_work() { // ------ check the staked value of all parties. // 30 and 40 are not chosen anymore - assert_eq!(ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index).count(), 2); + assert_eq!( + ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index) + .count(), + 2 + ); assert_eq!( Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { @@ -578,10 +607,7 @@ fn nominators_also_get_slashed_pro_rata() { let slash_percent = Perbill::from_percent(5); let initial_exposure = Staking::eras_stakers(active_era(), 11); // 101 is a nominator for 11 - assert_eq!( - initial_exposure.others.first().unwrap().who, - 101, - ); + assert_eq!(initial_exposure.others.first().unwrap().who, 101,); // staked values; let nominator_stake = Staking::ledger(100).unwrap().active; @@ -594,13 +620,7 @@ fn nominators_also_get_slashed_pro_rata() { // 11 goes offline on_offence_now( - &[OffenceDetails { - offender: ( - 11, - initial_exposure.clone(), - ), - reporters: vec![], - }], + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], &[slash_percent], ); @@ -611,24 +631,16 @@ fn nominators_also_get_slashed_pro_rata() { let slash_amount = slash_percent * exposed_stake; let validator_share = Perbill::from_rational(exposed_validator, exposed_stake) * slash_amount; - let nominator_share = Perbill::from_rational( - exposed_nominator, - exposed_stake, - ) * slash_amount; + let nominator_share = + Perbill::from_rational(exposed_nominator, exposed_stake) * slash_amount; // both slash amounts need to be positive for the test to make sense. assert!(validator_share > 0); assert!(nominator_share > 0); // both stakes must have been decreased pro-rata. - assert_eq!( - Staking::ledger(100).unwrap().active, - nominator_stake - nominator_share, - ); - assert_eq!( - Staking::ledger(10).unwrap().active, - validator_stake - validator_share, - ); + assert_eq!(Staking::ledger(100).unwrap().active, nominator_stake - nominator_share,); + assert_eq!(Staking::ledger(10).unwrap().active, validator_stake - validator_share,); assert_eq!( balances(&101).0, // free balance nominator_balance - nominator_share, @@ -651,14 +663,16 @@ fn double_staking_should_fail() { ExtBuilder::default().build_and_execute(|| { let arbitrary_value = 5; // 2 = controller, 1 stashed => ok - assert_ok!( - Staking::bond(Origin::signed(1), 2, arbitrary_value, - RewardDestination::default()) - ); + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + arbitrary_value, + RewardDestination::default() + )); // 4 = not used so far, 1 stashed => not allowed. assert_noop!( - Staking::bond(Origin::signed(1), 4, arbitrary_value, - RewardDestination::default()), Error::::AlreadyBonded, + Staking::bond(Origin::signed(1), 4, arbitrary_value, RewardDestination::default()), + Error::::AlreadyBonded, ); // 1 = stashed => attempting to nominate should fail. assert_noop!(Staking::nominate(Origin::signed(1), vec![1]), Error::::NotController); @@ -833,7 +847,6 @@ fn forcing_new_era_works() { start_session(15); assert_eq!(active_era(), 6); - }); } @@ -892,10 +905,7 @@ fn cannot_reserve_staked_balance() { // Confirm account 11 (via controller 10) is totally staked assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, 1000); // Confirm account 11 cannot reserve as a result - assert_noop!( - Balances::reserve(&11, 1), - BalancesError::::LiquidityRestrictions, - ); + assert_noop!(Balances::reserve(&11, 1), BalancesError::::LiquidityRestrictions,); // Give account 11 extra free balance let _ = Balances::make_free_balance_be(&11, 10000); @@ -915,13 +925,16 @@ fn reward_destination_works() { // Check the balance of the stash account assert_eq!(Balances::free_balance(11), 1000); // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Compute total payout now for whole duration as other parameter won't change let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); @@ -935,15 +948,18 @@ fn reward_destination_works() { // Check that reward went to the stash account of validator assert_eq!(Balances::free_balance(11), 1000 + total_payout_0); // Check that amount at stake increased accordingly - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0], - })); - - //Change RewardDestination to Stash + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0], + }) + ); + + // Change RewardDestination to Stash >::insert(&11, RewardDestination::Stash); // Compute total payout now for whole duration as other parameter won't change @@ -960,13 +976,16 @@ fn reward_destination_works() { // Record this value let recorded_stash_balance = 1000 + total_payout_0 + total_payout_1; // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0,1], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0, 1], + }) + ); // Change RewardDestination to Controller >::insert(&11, RewardDestination::Controller); @@ -986,13 +1005,16 @@ fn reward_destination_works() { // Check that reward went to the controller account assert_eq!(Balances::free_balance(10), 1 + total_payout_2); // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0,1,2], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0, 1, 2], + }) + ); // Check that amount in staked account is NOT increased. assert_eq!(Balances::free_balance(11), recorded_stash_balance); }); @@ -1005,10 +1027,10 @@ fn validator_payment_prefs_work() { // This test will focus on validator payment. ExtBuilder::default().build_and_execute(|| { let commission = Perbill::from_percent(40); - >::insert(&11, ValidatorPrefs { - commission: commission.clone(), - .. Default::default() - }); + >::insert( + &11, + ValidatorPrefs { commission: commission.clone(), ..Default::default() }, + ); // Reward controller so staked ratio doesn't change. >::insert(&11, RewardDestination::Controller); @@ -1035,7 +1057,6 @@ fn validator_payment_prefs_work() { assert_eq_error_rate!(Balances::total_balance(&10), balance_era_1_10 + reward_of_10, 2); assert_eq_error_rate!(Balances::total_balance(&100), balance_era_1_100 + reward_of_100, 2); }); - } #[test] @@ -1049,13 +1070,16 @@ fn bond_extra_works() { // Check that account 10 is bonded to account 11 assert_eq!(Staking::bonded(&11), Some(10)); // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Give account 11 some large free balance greater than total let _ = Balances::make_free_balance_be(&11, 1000000); @@ -1063,24 +1087,30 @@ fn bond_extra_works() { // Call the bond_extra function from controller, add only 100 assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); // There should be 100 more `total` and `active` in the ledger - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Call the bond_extra function with a large number, should handle it assert_ok!(Staking::bond_extra(Origin::signed(11), Balance::max_value())); // The full amount of the funds should now be in the total and active - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000000, - active: 1000000, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000000, + active: 1000000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); }); } @@ -1108,13 +1138,16 @@ fn bond_extra_and_withdraw_unbonded_works() { mock::start_active_era(1); // Initial state of 10 - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); assert_eq!( Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000, own: 1000, others: vec![] } @@ -1123,13 +1156,16 @@ fn bond_extra_and_withdraw_unbonded_works() { // deposit the extra 100 units Staking::bond_extra(Origin::signed(11), 100).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Exposure is a snapshot! only updated after the next era update. assert_ne!( Staking::eras_stakers(Staking::active_era().unwrap().index, 11), @@ -1141,13 +1177,16 @@ fn bond_extra_and_withdraw_unbonded_works() { assert_eq!(Staking::active_era().unwrap().index, 2); // ledger should be the same. - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); // Exposure is now updated. assert_eq!( Staking::eras_stakers(Staking::active_era().unwrap().index, 11), @@ -1162,7 +1201,7 @@ fn bond_extra_and_withdraw_unbonded_works() { stash: 11, total: 1000 + 100, active: 100, - unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], + unlocking: vec![UnlockChunk { value: 1000, era: 2 + 3 }], claimed_rewards: vec![] }), ); @@ -1175,7 +1214,7 @@ fn bond_extra_and_withdraw_unbonded_works() { stash: 11, total: 1000 + 100, active: 100, - unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], + unlocking: vec![UnlockChunk { value: 1000, era: 2 + 3 }], claimed_rewards: vec![] }), ); @@ -1191,7 +1230,7 @@ fn bond_extra_and_withdraw_unbonded_works() { stash: 11, total: 1000 + 100, active: 100, - unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], + unlocking: vec![UnlockChunk { value: 1000, era: 2 + 3 }], claimed_rewards: vec![] }), ); @@ -1218,7 +1257,7 @@ fn bond_extra_and_withdraw_unbonded_works() { fn too_many_unbond_calls_should_not_work() { ExtBuilder::default().build_and_execute(|| { // locked at era 0 until 3 - for _ in 0..MAX_UNLOCKING_CHUNKS-1 { + for _ in 0..MAX_UNLOCKING_CHUNKS - 1 { assert_ok!(Staking::unbond(Origin::signed(10), 1)); } @@ -1247,247 +1286,229 @@ fn rebond_works() { // * Given an account being bonded [and chosen as a validator](not mandatory) // * it can unbond a portion of its funds from the stash account. // * it can re-bond a portion of the funds scheduled to unlock. - ExtBuilder::default() - .nominate(false) - .build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee( - Origin::signed(10), - RewardDestination::Controller - )); + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); - // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_active_era(1); + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_active_era(1); - // Initial state of 10 - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - mock::start_active_era(2); - assert_eq!(Staking::active_era().unwrap().index, 2); + mock::start_active_era(2); + assert_eq!(Staking::active_era().unwrap().index, 2); - // Try to rebond some funds. We get an error since no fund is unbonded. - assert_noop!( - Staking::rebond(Origin::signed(10), 500), - Error::::NoUnlockChunk, - ); + // Try to rebond some funds. We get an error since no fund is unbonded. + assert_noop!(Staking::rebond(Origin::signed(10), 500), Error::::NoUnlockChunk,); - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![UnlockChunk { - value: 900, - era: 2 + 3, - }], - claimed_rewards: vec![], - }) - ); + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { value: 900, era: 2 + 3 }], + claimed_rewards: vec![], + }) + ); - // Re-bond all the funds unbonded. - Staking::rebond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Re-bond all the funds unbonded. + Staking::rebond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: vec![], - }) - ); + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { value: 900, era: 5 }], + claimed_rewards: vec![], + }) + ); - // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: vec![], - }) - ); + // Re-bond part of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![UnlockChunk { value: 400, era: 5 }], + claimed_rewards: vec![], + }) + ); - // Re-bond the remainder of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Re-bond the remainder of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - // Unbond parts of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![ - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 300, era: 5 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond parts of the funds in stash. + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![ + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 300, era: 5 }, + ], + claimed_rewards: vec![], + }) + ); - // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![ - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 100, era: 5 }, - ], - claimed_rewards: vec![], - }) - ); - }) + // Re-bond part of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![ + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 100, era: 5 }, + ], + claimed_rewards: vec![], + }) + ); + }) } #[test] fn rebond_is_fifo() { // Rebond should proceed by reversing the most recent bond operations. - ExtBuilder::default() - .nominate(false) - .build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee( - Origin::signed(10), - RewardDestination::Controller - )); + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); - // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_active_era(1); + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_active_era(1); - // Initial state of 10 - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); - mock::start_active_era(2); + mock::start_active_era(2); - // Unbond some of the funds in stash. - Staking::unbond(Origin::signed(10), 400).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond some of the funds in stash. + Staking::unbond(Origin::signed(10), 400).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![UnlockChunk { value: 400, era: 2 + 3 },], + claimed_rewards: vec![], + }) + ); - mock::start_active_era(3); + mock::start_active_era(3); - // Unbond more of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 300, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 300, era: 3 + 3 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond more of the funds in stash. + Staking::unbond(Origin::signed(10), 300).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 300, + unlocking: vec![ + UnlockChunk { value: 400, era: 2 + 3 }, + UnlockChunk { value: 300, era: 3 + 3 }, + ], + claimed_rewards: vec![], + }) + ); - mock::start_active_era(4); + mock::start_active_era(4); - // Unbond yet more of the funds in stash. - Staking::unbond(Origin::signed(10), 200).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 300, era: 3 + 3 }, - UnlockChunk { value: 200, era: 4 + 3 }, - ], - claimed_rewards: vec![], - }) - ); + // Unbond yet more of the funds in stash. + Staking::unbond(Origin::signed(10), 200).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![ + UnlockChunk { value: 400, era: 2 + 3 }, + UnlockChunk { value: 300, era: 3 + 3 }, + UnlockChunk { value: 200, era: 4 + 3 }, + ], + claimed_rewards: vec![], + }) + ); - // Re-bond half of the unbonding funds. - Staking::rebond(Origin::signed(10), 400).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 500, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 100, era: 3 + 3 }, - ], - claimed_rewards: vec![], - }) - ); - }) + // Re-bond half of the unbonding funds. + Staking::rebond(Origin::signed(10), 400).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 500, + unlocking: vec![ + UnlockChunk { value: 400, era: 2 + 3 }, + UnlockChunk { value: 100, era: 3 + 3 }, + ], + claimed_rewards: vec![], + }) + ); + }) } #[test] @@ -1510,7 +1531,16 @@ fn reward_to_stake_works() { // Now lets lower account 20 stake assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); - >::insert(&20, StakingLedger { stash: 21, total: 69, active: 69, unlocking: vec![], claimed_rewards: vec![] }); + >::insert( + &20, + StakingLedger { + stash: 21, + total: 69, + active: 69, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); // Compute total payout now for whole duration as other parameter won't change let total_payout_0 = current_total_payout_for_duration(reward_time_per_era()); @@ -1531,8 +1561,14 @@ fn reward_to_stake_works() { mock::start_active_era(2); // -- new infos - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000 + total_payout_0 / 2); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69 + total_payout_0 / 2); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + 1000 + total_payout_0 / 2 + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, + 69 + total_payout_0 / 2 + ); }); } @@ -1653,18 +1689,21 @@ fn on_free_balance_zero_stash_removes_nominator() { }); } - #[test] fn switching_roles() { // Test that it should be possible to switch between roles (nominator, validator, idle) with minimal overhead. ExtBuilder::default().nominate(false).build_and_execute(|| { // Reset reward destination - for i in &[10, 20] { assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); } + for i in &[10, 20] { + assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); + } assert_eq_uvec!(validator_controllers(), vec![20, 10]); // put some money in account that we'll use. - for i in 1..7 { let _ = Balances::deposit_creating(&i, 5000); } + for i in 1..7 { + let _ = Balances::deposit_creating(&i, 5000); + } // add 2 nominators assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::Controller)); @@ -1699,24 +1738,32 @@ fn switching_roles() { #[test] fn wrong_vote_is_null() { - ExtBuilder::default().nominate(false).validator_pool(true).build_and_execute(|| { - assert_eq_uvec!(validator_controllers(), vec![40, 30]); + ExtBuilder::default() + .nominate(false) + .validator_pool(true) + .build_and_execute(|| { + assert_eq_uvec!(validator_controllers(), vec![40, 30]); - // put some money in account that we'll use. - for i in 1..3 { let _ = Balances::deposit_creating(&i, 5000); } + // put some money in account that we'll use. + for i in 1..3 { + let _ = Balances::deposit_creating(&i, 5000); + } - // add 1 nominators - assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![ - 11, 21, // good votes - 1, 2, 15, 1000, 25 // crap votes. No effect. - ])); + // add 1 nominators + assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::default())); + assert_ok!(Staking::nominate( + Origin::signed(2), + vec![ + 11, 21, // good votes + 1, 2, 15, 1000, 25 // crap votes. No effect. + ] + )); - // new block - mock::start_active_era(1); + // new block + mock::start_active_era(1); - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - }); + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + }); } #[test] @@ -1748,7 +1795,7 @@ fn bond_with_no_staked_value() { stash: 1, active: 0, total: 5, - unlocking: vec![UnlockChunk {value: 5, era: 3}], + unlocking: vec![UnlockChunk { value: 5, era: 3 }], claimed_rewards: vec![], }) ); @@ -1800,7 +1847,11 @@ fn bond_with_little_staked_value_bounded() { assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); // Old ones are rewarded. - assert_eq_error_rate!(Balances::free_balance(10), init_balance_10 + total_payout_0 / 3, 1); + assert_eq_error_rate!( + Balances::free_balance(10), + init_balance_10 + total_payout_0 / 3, + 1 + ); // no rewards paid to 2. This was initial election. assert_eq!(Balances::free_balance(2), init_balance_2); @@ -1814,7 +1865,11 @@ fn bond_with_little_staked_value_bounded() { assert_eq!(Staking::eras_stakers(active_era(), 2).total, 0); // 2 is now rewarded. - assert_eq_error_rate!(Balances::free_balance(2), init_balance_2 + total_payout_1 / 3, 1); + assert_eq_error_rate!( + Balances::free_balance(2), + init_balance_2 + total_payout_1 / 3, + 1 + ); assert_eq_error_rate!( Balances::free_balance(&10), init_balance_10 + total_payout_0 / 3 + total_payout_1 / 3, @@ -1893,7 +1948,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { // give the man some money. let initial_balance = 1000; - for i in [1, 2, 3, 4,].iter() { + for i in [1, 2, 3, 4].iter() { let _ = Balances::make_free_balance_be(i, initial_balance); } @@ -1991,20 +2046,22 @@ fn reward_validator_slashing_validator_does_not_overflow() { // it is 0. Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); // Override exposure of 11 - ErasStakers::::insert(0, 11, Exposure { - total: stake, - own: 1, - others: vec![ IndividualExposure { who: 2, value: stake - 1 }] - }); + ErasStakers::::insert( + 0, + 11, + Exposure { + total: stake, + own: 1, + others: vec![IndividualExposure { who: 2, value: stake - 1 }], + }, + ); // Check slashing on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + reporters: vec![], + }], &[Perbill::from_percent(100)], ); @@ -2046,24 +2103,13 @@ fn add_reward_points_fns_works() { // Not mandatory but must be coherent with rewards assert_eq_uvec!(Session::validators(), vec![21, 11]); - >::reward_by_ids(vec![ - (21, 1), - (11, 1), - (11, 1), - ]); + >::reward_by_ids(vec![(21, 1), (11, 1), (11, 1)]); - >::reward_by_ids(vec![ - (21, 1), - (11, 1), - (11, 1), - ]); + >::reward_by_ids(vec![(21, 1), (11, 1), (11, 1)]); assert_eq!( ErasRewardPoints::::get(Staking::active_era().unwrap().index), - EraRewardPoints { - individual: vec![(11, 4), (21, 2)].into_iter().collect(), - total: 6, - }, + EraRewardPoints { individual: vec![(11, 4), (21, 2)].into_iter().collect(), total: 6 }, ); }) } @@ -2074,7 +2120,7 @@ fn unbonded_balance_is_not_slashable() { // total amount staked is slashable. assert_eq!(Staking::slashable_balance_of(&11), 1000); - assert_ok!(Staking::unbond(Origin::signed(10), 800)); + assert_ok!(Staking::unbond(Origin::signed(10), 800)); // only the active portion. assert_eq!(Staking::slashable_balance_of(&11), 200); @@ -2092,7 +2138,10 @@ fn era_is_always_same_length() { assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era); mock::start_active_era(2); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era * 2u32); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session_per_era * 2u32 + ); let session = Session::current_index(); ForceEra::::put(Forcing::ForceNew); @@ -2102,7 +2151,10 @@ fn era_is_always_same_length() { assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2); mock::start_active_era(4); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2u32 + session_per_era); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session + 2u32 + session_per_era + ); }); } @@ -2111,10 +2163,7 @@ fn offence_forces_new_era() { ExtBuilder::default().build_and_execute(|| { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2132,10 +2181,7 @@ fn offence_ensures_new_era_without_clobbering() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }], &[Perbill::from_percent(5)], @@ -2153,10 +2199,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2182,14 +2225,7 @@ fn slashing_performed_according_exposure() { // Handle an offence with a historical exposure. on_offence_now( &[OffenceDetails { - offender: ( - 11, - Exposure { - total: 500, - own: 500, - others: vec![], - }, - ), + offender: (11, Exposure { total: 500, own: 500, others: vec![] }), reporters: vec![], }], &[Perbill::from_percent(50)], @@ -2210,10 +2246,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2236,10 +2269,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_in_era( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2253,10 +2283,7 @@ fn slash_in_old_span_does_not_deselect() { on_offence_in_era( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }], // NOTE: A 100% slash here would clean up the account, causing de-registration. @@ -2279,14 +2306,14 @@ fn reporters_receive_their_slice() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + initial_balance + ); on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![1, 2], }], &[Perbill::from_percent(50)], @@ -2309,14 +2336,14 @@ fn subsequent_reports_in_same_span_pay_out_less() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + initial_balance + ); on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![1], }], &[Perbill::from_percent(20)], @@ -2329,10 +2356,7 @@ fn subsequent_reports_in_same_span_pay_out_less() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![1], }], &[Perbill::from_percent(50)], @@ -2357,8 +2381,8 @@ fn invulnerables_are_not_slashed() { let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); let initial_balance = Staking::slashable_balance_of(&21); - let nominator_balances: Vec<_> = exposure.others - .iter().map(|o| Balances::free_balance(&o.who)).collect(); + let nominator_balances: Vec<_> = + exposure.others.iter().map(|o| Balances::free_balance(&o.who)).collect(); on_offence_now( &[ @@ -2397,10 +2421,7 @@ fn dont_slash_if_fraction_is_zero() { on_offence_now( &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }], &[Perbill::from_percent(0)], @@ -2420,12 +2441,10 @@ fn only_slash_for_max_in_era() { assert_eq!(Balances::free_balance(11), 1000); on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + reporters: vec![], + }], &[Perbill::from_percent(50)], ); @@ -2434,12 +2453,10 @@ fn only_slash_for_max_in_era() { assert_eq!(Staking::force_era(), Forcing::ForceNew); on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + reporters: vec![], + }], &[Perbill::from_percent(25)], ); @@ -2447,12 +2464,10 @@ fn only_slash_for_max_in_era() { assert_eq!(Balances::free_balance(11), 500); on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + reporters: vec![], + }], &[Perbill::from_percent(60)], ); @@ -2465,52 +2480,54 @@ fn only_slash_for_max_in_era() { fn garbage_collection_after_slashing() { // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. ExtBuilder::default() - .existential_deposit(2) - .min_nominator_bond(2) - .min_validator_bond(2) - .build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 256_000); + .existential_deposit(2) + .min_nominator_bond(2) + .min_validator_bond(2) + .build_and_execute(|| { + assert_eq!(Balances::free_balance(11), 256_000); - on_offence_now( - &[ - OffenceDetails { + on_offence_now( + &[OffenceDetails { offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); + }], + &[Perbill::from_percent(10)], + ); - assert_eq!(Balances::free_balance(11), 256_000 - 25_600); - assert!(::SlashingSpans::get(&11).is_some()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &25_600); + assert_eq!(Balances::free_balance(11), 256_000 - 25_600); + assert!(::SlashingSpans::get(&11).is_some()); + assert_eq!( + ::SpanSlash::get(&(11, 0)).amount_slashed(), + &25_600 + ); - on_offence_now( - &[ - OffenceDetails { + on_offence_now( + &[OffenceDetails { offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], - }, - ], - &[Perbill::from_percent(100)], - ); + }], + &[Perbill::from_percent(100)], + ); - // validator and nominator slash in era are garbage-collected by era change, - // so we don't test those here. + // validator and nominator slash in era are garbage-collected by era change, + // so we don't test those here. - assert_eq!(Balances::free_balance(11), 2); - assert_eq!(Balances::total_balance(&11), 2); + assert_eq!(Balances::free_balance(11), 2); + assert_eq!(Balances::total_balance(&11), 2); - let slashing_spans = ::SlashingSpans::get(&11).unwrap(); - assert_eq!(slashing_spans.iter().count(), 2); + let slashing_spans = ::SlashingSpans::get(&11).unwrap(); + assert_eq!(slashing_spans.iter().count(), 2); - // reap_stash respects num_slashing_spans so that weight is accurate - assert_noop!(Staking::reap_stash(Origin::none(), 11, 0), Error::::IncorrectSlashingSpans); - assert_ok!(Staking::reap_stash(Origin::none(), 11, 2)); + // reap_stash respects num_slashing_spans so that weight is accurate + assert_noop!( + Staking::reap_stash(Origin::none(), 11, 0), + Error::::IncorrectSlashingSpans + ); + assert_ok!(Staking::reap_stash(Origin::none(), 11, 2)); - assert!(::SlashingSpans::get(&11).is_none()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); - }) + assert!(::SlashingSpans::get(&11).is_none()); + assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); + }) } #[test] @@ -2527,13 +2544,8 @@ fn garbage_collection_on_window_pruning() { assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(now, 11)), - reporters: vec![], - }, - ], + on_offence_now( + &[OffenceDetails { offender: (11, Staking::eras_stakers(now, 11)), reporters: vec![] }], &[Perbill::from_percent(10)], ); @@ -2574,12 +2586,10 @@ fn slashing_nominators_by_span_max() { let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_in_era( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + reporters: vec![], + }], &[Perbill::from_percent(10)], 2, ); @@ -2596,24 +2606,16 @@ fn slashing_nominators_by_span_max() { let get_span = |account| ::SlashingSpans::get(&account).unwrap(); - assert_eq!( - get_span(11).iter().collect::>(), - expected_spans, - ); + assert_eq!(get_span(11).iter().collect::>(), expected_spans,); - assert_eq!( - get_span(101).iter().collect::>(), - expected_spans, - ); + assert_eq!(get_span(101).iter().collect::>(), expected_spans,); // second slash: higher era, higher value, same span. on_offence_in_era( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), + reporters: vec![], + }], &[Perbill::from_percent(30)], 3, ); @@ -2631,12 +2633,10 @@ fn slashing_nominators_by_span_max() { // third slash: in same era and on same validator as first, higher // in-era value, but lower slash value than slash 2. on_offence_in_era( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + reporters: vec![], + }], &[Perbill::from_percent(20)], 2, ); @@ -2667,12 +2667,10 @@ fn slashes_are_summed_across_spans() { let get_span = |account| ::SlashingSpans::get(&account).unwrap(); on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); @@ -2692,12 +2690,10 @@ fn slashes_are_summed_across_spans() { assert_eq!(Staking::slashable_balance_of(&21), 900); on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); @@ -2714,84 +2710,68 @@ fn slashes_are_summed_across_spans() { #[test] fn deferred_slashes_are_deferred() { - ExtBuilder::default() - .slash_defer_duration(2) - .build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_active_era(2); + mock::start_active_era(2); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_active_era(3); + mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // at the start of era 4, slashes from era 1 are processed, - // after being deferred for at least 2 full eras. - mock::start_active_era(4); + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_active_era(4); - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); - }) + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + }) } #[test] fn remove_deferred() { - ExtBuilder::default() - .slash_defer_duration(2) - .build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(10)], ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_active_era(2); + mock::start_active_era(2); - on_offence_in_era( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(15)], 1, ); @@ -2802,32 +2782,32 @@ fn remove_deferred() { Error::::EmptyTargets ); - assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0])); + assert_ok!(Staking::cancel_deferred_slash(Origin::root(), 1, vec![0])); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_active_era(3); + mock::start_active_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // at the start of era 4, slashes from era 1 are processed, - // after being deferred for at least 2 full eras. - mock::start_active_era(4); + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_active_era(4); - // the first slash for 10% was cancelled, so no effect. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + // the first slash for 10% was cancelled, so no effect. + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_active_era(5); + mock::start_active_era(5); - let slash_10 = Perbill::from_percent(10); - let slash_15 = Perbill::from_percent(15); - let initial_slash = slash_10 * nominated_value; + let slash_10 = Perbill::from_percent(10); + let slash_15 = Perbill::from_percent(15); + let initial_slash = slash_10 * nominated_value; - let total_slash = slash_15 * nominated_value; - let actual_slash = total_slash - initial_slash; + let total_slash = slash_15 * nominated_value; + let actual_slash = total_slash - initial_slash; // 5% slash (15 - 10) processed now. assert_eq!(Balances::free_balance(11), 950); @@ -2837,63 +2817,39 @@ fn remove_deferred() { #[test] fn remove_multi_deferred() { - ExtBuilder::default() - .slash_defer_duration(2) - .build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); - on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(10)], ); on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - } - ], + &[OffenceDetails { + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], ); on_offence_now( - &[ - OffenceDetails { - offender: (42, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (42, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], ); on_offence_now( - &[ - OffenceDetails { - offender: (69, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (69, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(25)], ); @@ -2942,20 +2898,14 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq!(exposure_21.total, 1000 + 375); on_offence_now( - &[OffenceDetails { - offender: (11, exposure_11.clone()), - reporters: vec![], - }], + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], &[Perbill::from_percent(10)], ); // post-slash balance let nominator_slash_amount_11 = 125 / 10; assert_eq!(Balances::free_balance(11), 900); - assert_eq!( - Balances::free_balance(101), - 2000 - nominator_slash_amount_11 - ); + assert_eq!(Balances::free_balance(101), 2000 - nominator_slash_amount_11); // This is the best way to check that the validator was chilled; `get` will // return default value. @@ -2967,9 +2917,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid // and make sure that the vote will be ignored even if the validator // re-registers. - let last_slash = ::SlashingSpans::get(&11) - .unwrap() - .last_nonzero_slash(); + let last_slash = ::SlashingSpans::get(&11).unwrap().last_nonzero_slash(); assert!(nominations.submitted_in < last_slash); // actually re-bond the slashed validator @@ -3082,12 +3030,7 @@ fn zero_slash_keeps_nominators() { assert_eq!(Balances::free_balance(101), 2000); on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], &[Perbill::from_percent(0)], ); @@ -3120,10 +3063,16 @@ fn six_session_delay() { // pallet-session is delaying session by one, thus the next session to plan is +2. assert_eq!(>::new_session(init_session + 2), None); - assert_eq!(>::new_session(init_session + 3), Some(val_set.clone())); + assert_eq!( + >::new_session(init_session + 3), + Some(val_set.clone()) + ); assert_eq!(>::new_session(init_session + 4), None); assert_eq!(>::new_session(init_session + 5), None); - assert_eq!(>::new_session(init_session + 6), Some(val_set.clone())); + assert_eq!( + >::new_session(init_session + 6), + Some(val_set.clone()) + ); >::end_session(init_session); >::start_session(init_session + 1); @@ -3171,14 +3120,12 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( let controller = 20_000 + i as AccountId; let balance = 10_000 + i as Balance; Balances::make_free_balance_be(&stash, balance); - assert_ok!( - Staking::bond( - Origin::signed(stash), - controller, - balance, - RewardDestination::Stash - ) - ); + assert_ok!(Staking::bond( + Origin::signed(stash), + controller, + balance, + RewardDestination::Stash + )); assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); } mock::start_active_era(1); @@ -3259,7 +3206,13 @@ fn test_payout_stakers() { // We track rewards in `claimed_rewards` vec assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![1] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![1] + }) ); for i in 3..16 { @@ -3275,7 +3228,13 @@ fn test_payout_stakers() { // We track rewards in `claimed_rewards` vec assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: (1..=14).collect() }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: (1..=14).collect() + }) ); for i in 16..100 { @@ -3290,7 +3249,13 @@ fn test_payout_stakers() { assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 98] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![15, 98] + }) ); // Out of order claims works. @@ -3299,7 +3264,13 @@ fn test_payout_stakers() { assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 42)); assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 23, 42, 69, 98] }) + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![15, 23, 42, 69, 98] + }) ); }); } @@ -3383,10 +3354,10 @@ fn payout_stakers_handles_weight_refund() { assert!(half_max_nom_rewarded > 0); assert!(max_nom_rewarded > half_max_nom_rewarded); - let max_nom_rewarded_weight - = ::WeightInfo::payout_stakers_alive_staked(max_nom_rewarded); - let half_max_nom_rewarded_weight - = ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); + let max_nom_rewarded_weight = + ::WeightInfo::payout_stakers_alive_staked(max_nom_rewarded); + let half_max_nom_rewarded_weight = + ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); assert!(zero_nom_payouts_weight > 0); assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); @@ -3395,7 +3366,7 @@ fn payout_stakers_handles_weight_refund() { let balance = 1000; bond_validator(11, 10, balance); - /* Era 1 */ + // Era 1 start_active_era(1); // Reward just the validator. @@ -3407,7 +3378,7 @@ fn payout_stakers_handles_weight_refund() { bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); } - /* Era 2 */ + // Era 2 start_active_era(2); // Collect payouts when there are no nominators @@ -3415,14 +3386,11 @@ fn payout_stakers_handles_weight_refund() { let info = call.get_dispatch_info(); let result = call.dispatch(Origin::signed(20)); assert_ok!(result); - assert_eq!( - extract_actual_weight(&result, &info), - zero_nom_payouts_weight - ); + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); // The validator is not rewarded in this era; so there will be zero payouts to claim for this era. - /* Era 3 */ + // Era 3 start_active_era(3); // Collect payouts for an era where the validator did not receive any points. @@ -3435,7 +3403,7 @@ fn payout_stakers_handles_weight_refund() { // Reward the validator and its nominators. Staking::reward_by_ids(vec![(11, 1)]); - /* Era 4 */ + // Era 4 start_active_era(4); // Collect payouts when the validator has `half_max_nom_rewarded` nominators. @@ -3451,14 +3419,14 @@ fn payout_stakers_handles_weight_refund() { bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); } - /* Era 5 */ + // Era 5 start_active_era(5); // We now have `max_nom_rewarded` nominators actively nominating our validator. // Reward the validator so we can collect for everyone in the next era. Staking::reward_by_ids(vec![(11, 1)]); - /* Era 6 */ + // Era 6 start_active_era(6); // Collect payouts when the validator had `half_max_nom_rewarded` nominators. @@ -3665,7 +3633,6 @@ fn session_buffering_with_offset() { assert_eq!(current_era(), 2); assert_eq!(active_era(), 2); assert_eq!(Session::current_index(), 10); - }); } @@ -3717,7 +3684,6 @@ fn session_buffering_no_offset() { assert_eq!(current_era(), 2); assert_eq!(active_era(), 2); assert_eq!(Session::current_index(), 10); - }); } @@ -3758,10 +3724,7 @@ fn cannot_rebond_to_lower_than_ed() { ); // now bond a wee bit more - assert_noop!( - Staking::rebond(Origin::signed(20), 5), - Error::::InsufficientBond, - ); + assert_noop!(Staking::rebond(Origin::signed(20), 5), Error::::InsufficientBond,); }) } @@ -3796,10 +3759,7 @@ fn cannot_bond_extra_to_lower_than_ed() { stash: 21, total: 1000, active: 0, - unlocking: vec![UnlockChunk { - value: 1000, - era: 3 - }], + unlocking: vec![UnlockChunk { value: 1000, era: 3 }], claimed_rewards: vec![] } ); @@ -3866,8 +3826,8 @@ mod election_data_provider { #[test] fn targets_2sec_block() { let mut validators = 1000; - while ::WeightInfo::get_npos_targets(validators) - < 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + while ::WeightInfo::get_npos_targets(validators) < + 2 * frame_support::weights::constants::WEIGHT_PER_SECOND { validators += 1; } @@ -3884,8 +3844,8 @@ mod election_data_provider { let slashing_spans = validators; let mut nominators = 1000; - while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) - < 2 * frame_support::weights::constants::WEIGHT_PER_SECOND + while ::WeightInfo::get_npos_voters(validators, nominators, slashing_spans) < + 2 * frame_support::weights::constants::WEIGHT_PER_SECOND { nominators += 1; } @@ -3975,10 +3935,7 @@ mod election_data_provider { run_to_block(20); assert_eq!(Staking::next_election_prediction(System::block_number()), 45); assert_eq!(staking_events().len(), 1); - assert_eq!( - *staking_events().last().unwrap(), - Event::StakingElection - ); + assert_eq!(*staking_events().last().unwrap(), Event::StakingElection); for b in 21..45 { run_to_block(b); @@ -3989,10 +3946,7 @@ mod election_data_provider { run_to_block(45); assert_eq!(Staking::next_election_prediction(System::block_number()), 70); assert_eq!(staking_events().len(), 3); - assert_eq!( - *staking_events().last().unwrap(), - Event::StakingElection - ); + assert_eq!(*staking_events().last().unwrap(), Event::StakingElection); Staking::force_no_eras(Origin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), u64::MAX); @@ -4015,10 +3969,7 @@ mod election_data_provider { run_to_block(55); assert_eq!(Staking::next_election_prediction(System::block_number()), 55 + 25); assert_eq!(staking_events().len(), 6); - assert_eq!( - *staking_events().last().unwrap(), - Event::StakingElection - ); + assert_eq!(*staking_events().last().unwrap(), Event::StakingElection); // The new era has been planned, forcing is changed from `ForceNew` to `NotForcing`. assert_eq!(ForceEra::::get(), Forcing::NotForcing); }) @@ -4032,11 +3983,14 @@ mod election_data_provider { // not keep track of the count. This test should panic as we verify the count is accurate // after every test using the `post_checks` in `mock`. Validators::::insert(987654321, ValidatorPrefs::default()); - Nominators::::insert(987654321, Nominations { - targets: vec![], - submitted_in: Default::default(), - suppressed: false, - }); + Nominators::::insert( + 987654321, + Nominations { + targets: vec![], + submitted_in: Default::default(), + suppressed: false, + }, + ); }) } @@ -4049,7 +4003,10 @@ mod election_data_provider { .build_and_execute(|| { // 500 is not enough for any role assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); - assert_noop!(Staking::nominate(Origin::signed(4), vec![1]), Error::::InsufficientBond); + assert_noop!( + Staking::nominate(Origin::signed(4), vec![1]), + Error::::InsufficientBond + ); assert_noop!( Staking::validate(Origin::signed(4), ValidatorPrefs::default()), Error::::InsufficientBond, @@ -4069,12 +4026,18 @@ mod election_data_provider { assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); // Can't unbond anything as validator - assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + assert_noop!( + Staking::unbond(Origin::signed(4), 500), + Error::::InsufficientBond + ); // Once they are a nominator, they can unbond 500 assert_ok!(Staking::nominate(Origin::signed(4), vec![1])); assert_ok!(Staking::unbond(Origin::signed(4), 500)); - assert_noop!(Staking::unbond(Origin::signed(4), 500), Error::::InsufficientBond); + assert_noop!( + Staking::unbond(Origin::signed(4), 500), + Error::::InsufficientBond + ); // Once they are chilled they can unbond everything assert_ok!(Staking::chill(Origin::signed(4))); @@ -4089,7 +4052,7 @@ mod election_data_provider { .min_nominator_bond(1_000) .min_validator_bond(1_500) .build_and_execute(|| { - for i in 0 .. 15 { + for i in 0..15 { let a = 4 * i; let b = 4 * i + 1; let c = 4 * i + 2; @@ -4100,11 +4063,21 @@ mod election_data_provider { Balances::make_free_balance_be(&d, 100_000); // Nominator - assert_ok!(Staking::bond(Origin::signed(a), b, 1000, RewardDestination::Controller)); + assert_ok!(Staking::bond( + Origin::signed(a), + b, + 1000, + RewardDestination::Controller + )); assert_ok!(Staking::nominate(Origin::signed(b), vec![1])); // Validator - assert_ok!(Staking::bond(Origin::signed(c), d, 1500, RewardDestination::Controller)); + assert_ok!(Staking::bond( + Origin::signed(c), + d, + 1500, + RewardDestination::Controller + )); assert_ok!(Staking::validate(Origin::signed(d), ValidatorPrefs::default())); } @@ -4117,35 +4090,83 @@ mod election_data_provider { // `chill_other` to succeed from one user to another. // Can't chill these users - assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); - assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); // Change the minimum bond... but no limits. - assert_ok!(Staking::set_staking_limits(Origin::root(), 1_500, 2_000, None, None, None)); + assert_ok!(Staking::set_staking_limits( + Origin::root(), + 1_500, + 2_000, + None, + None, + None + )); // Still can't chill these users - assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); - assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); // Add limits, but no threshold - assert_ok!(Staking::set_staking_limits(Origin::root(), 1_500, 2_000, Some(10), Some(10), None)); + assert_ok!(Staking::set_staking_limits( + Origin::root(), + 1_500, + 2_000, + Some(10), + Some(10), + None + )); // Still can't chill these users - assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); - assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); // Add threshold, but no limits assert_ok!(Staking::set_staking_limits( - Origin::root(), 1_500, 2_000, None, None, Some(Percent::from_percent(0)) + Origin::root(), + 1_500, + 2_000, + None, + None, + Some(Percent::from_percent(0)) )); // Still can't chill these users - assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); - assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); // Add threshold and limits assert_ok!(Staking::set_staking_limits( - Origin::root(), 1_500, 2_000, Some(10), Some(10), Some(Percent::from_percent(75)) + Origin::root(), + 1_500, + 2_000, + Some(10), + Some(10), + Some(Percent::from_percent(75)) )); // 16 people total because tests start with 1 active one @@ -4153,7 +4174,7 @@ mod election_data_provider { assert_eq!(CounterForValidators::::get(), 16); // Users can now be chilled down to 7 people, so we try to remove 9 of them (starting with 16) - for i in 6 .. 15 { + for i in 6..15 { let b = 4 * i + 1; let d = 4 * i + 3; assert_ok!(Staking::chill_other(Origin::signed(1337), b)); @@ -4161,8 +4182,14 @@ mod election_data_provider { } // Cant go lower. - assert_noop!(Staking::chill_other(Origin::signed(1337), 1), Error::::CannotChillOther); - assert_noop!(Staking::chill_other(Origin::signed(1337), 3), Error::::CannotChillOther); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 1), + Error::::CannotChillOther + ); + assert_noop!( + Staking::chill_other(Origin::signed(1337), 3), + Error::::CannotChillOther + ); }) } @@ -4177,23 +4204,37 @@ mod election_data_provider { // Change the maximums let max = 10; assert_ok!(Staking::set_staking_limits( - Origin::root(), 10, 10, Some(max), Some(max), Some(Percent::from_percent(0)) + Origin::root(), + 10, + 10, + Some(max), + Some(max), + Some(Percent::from_percent(0)) )); // can create `max - validator_count` validators let mut some_existing_validator = AccountId::default(); - for i in 0 .. max - validator_count { + for i in 0..max - validator_count { let (_, controller) = testing_utils::create_stash_controller::( - i + 10_000_000, 100, RewardDestination::Controller, - ).unwrap(); - assert_ok!(Staking::validate(Origin::signed(controller), ValidatorPrefs::default())); + i + 10_000_000, + 100, + RewardDestination::Controller, + ) + .unwrap(); + assert_ok!(Staking::validate( + Origin::signed(controller), + ValidatorPrefs::default() + )); some_existing_validator = controller; } // but no more let (_, last_validator) = testing_utils::create_stash_controller::( - 1337, 100, RewardDestination::Controller, - ).unwrap(); + 1337, + 100, + RewardDestination::Controller, + ) + .unwrap(); assert_noop!( Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default()), @@ -4202,29 +4243,44 @@ mod election_data_provider { // same with nominators let mut some_existing_nominator = AccountId::default(); - for i in 0 .. max - nominator_count { + for i in 0..max - nominator_count { let (_, controller) = testing_utils::create_stash_controller::( - i + 20_000_000, 100, RewardDestination::Controller, - ).unwrap(); + i + 20_000_000, + 100, + RewardDestination::Controller, + ) + .unwrap(); assert_ok!(Staking::nominate(Origin::signed(controller), vec![1])); some_existing_nominator = controller; } // one more is too many let (_, last_nominator) = testing_utils::create_stash_controller::( - 30_000_000, 100, RewardDestination::Controller, - ).unwrap(); - assert_noop!(Staking::nominate(Origin::signed(last_nominator), vec![1]), Error::::TooManyNominators); + 30_000_000, + 100, + RewardDestination::Controller, + ) + .unwrap(); + assert_noop!( + Staking::nominate(Origin::signed(last_nominator), vec![1]), + Error::::TooManyNominators + ); // Re-nominate works fine assert_ok!(Staking::nominate(Origin::signed(some_existing_nominator), vec![1])); // Re-validate works fine - assert_ok!(Staking::validate(Origin::signed(some_existing_validator), ValidatorPrefs::default())); + assert_ok!(Staking::validate( + Origin::signed(some_existing_validator), + ValidatorPrefs::default() + )); // No problem when we set to `None` again assert_ok!(Staking::set_staking_limits(Origin::root(), 10, 10, None, None, None)); assert_ok!(Staking::nominate(Origin::signed(last_nominator), vec![1])); - assert_ok!(Staking::validate(Origin::signed(last_validator), ValidatorPrefs::default())); + assert_ok!(Staking::validate( + Origin::signed(last_validator), + ValidatorPrefs::default() + )); }) } } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index cf14e8b22362f4c95211b2d96f4e9db3529f9eaa..cba4e68b5f61eee963a369027d23fafaf426134b 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -36,6 +36,7 @@ // --template=./.maintain/frame-weight-template.hbs +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] diff --git a/substrate/frame/sudo/src/lib.rs b/substrate/frame/sudo/src/lib.rs index 6f70ddda99f64ce36223c84743a9fcbb7c2834be..7f0f6f57bf42f003caf314ce4c090b9873250ad1 100644 --- a/substrate/frame/sudo/src/lib.rs +++ b/substrate/frame/sudo/src/lib.rs @@ -93,13 +93,10 @@ #![cfg_attr(not(feature = "std"), no_std)] +use sp_runtime::{traits::StaticLookup, DispatchResult}; use sp_std::prelude::*; -use sp_runtime::{DispatchResult, traits::StaticLookup}; -use frame_support::{ - weights::GetDispatchInfo, - traits::UnfilteredDispatchable, -}; +use frame_support::{traits::UnfilteredDispatchable, weights::GetDispatchInfo}; #[cfg(test)] mod mock; @@ -110,9 +107,9 @@ pub use pallet::*; #[frame_support::pallet] pub mod pallet { + use super::{DispatchResult, *}; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::{*, DispatchResult}; #[pallet::config] pub trait Config: frame_system::Config { @@ -120,7 +117,7 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// A sudo-able call. - type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; + type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; } #[pallet::pallet] @@ -233,7 +230,7 @@ pub mod pallet { pub fn sudo_as( origin: OriginFor, who: ::Source, - call: Box<::Call> + call: Box<::Call>, ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -282,9 +279,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { - key: Default::default(), - } + Self { key: Default::default() } } } diff --git a/substrate/frame/sudo/src/mock.rs b/substrate/frame/sudo/src/mock.rs index 92683f98fb64f79dbc1c8d014a6c4dc707ef95f4..4fa24dd56ce59013a61a2a090000e2d065e96f96 100644 --- a/substrate/frame/sudo/src/mock.rs +++ b/substrate/frame/sudo/src/mock.rs @@ -18,20 +18,25 @@ //! Test utilities use super::*; -use frame_support::{parameter_types, traits::GenesisBuild}; -use sp_core::H256; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use sp_io; use crate as sudo; -use frame_support::traits::Filter; +use frame_support::{ + parameter_types, + traits::{Filter, GenesisBuild}, +}; use frame_system::limits; +use sp_core::H256; +use sp_io; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; // Logger module to track execution. #[frame_support::pallet] pub mod logger { + use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; - use super::*; #[pallet::config] pub trait Config: frame_system::Config { @@ -48,7 +53,7 @@ pub mod logger { pub fn privileged_i32_log( origin: OriginFor, i: i32, - weight: Weight + weight: Weight, ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is `Root`. ensure_root(origin)?; @@ -61,7 +66,7 @@ pub mod logger { pub fn non_privileged_log( origin: OriginFor, i: i32, - weight: Weight + weight: Weight, ) -> DispatchResultWithPostInfo { // Ensure that the `origin` is some signed account. let sender = ensure_signed(origin)?; @@ -82,22 +87,13 @@ pub mod logger { #[pallet::storage] #[pallet::getter(fn account_log)] - pub(super) type AccountLog = StorageValue< - _, - Vec, - ValueQuery - >; + pub(super) type AccountLog = StorageValue<_, Vec, ValueQuery>; #[pallet::storage] #[pallet::getter(fn i32_log)] - pub(super) type I32Log = StorageValue< - _, - Vec, - ValueQuery - >; + pub(super) type I32Log = StorageValue<_, Vec, ValueQuery>; } - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -169,8 +165,8 @@ pub type LoggerCall = logger::Call; // Build test environment by setting the root `key` for the Genesis. pub fn new_test_ext(root_key: u64) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - sudo::GenesisConfig::{ - key: root_key, - }.assimilate_storage(&mut t).unwrap(); + sudo::GenesisConfig:: { key: root_key } + .assimilate_storage(&mut t) + .unwrap(); t.into() } diff --git a/substrate/frame/sudo/src/tests.rs b/substrate/frame/sudo/src/tests.rs index aa859c547c039766be1a547435d2d8cca4c8b842..9437f20832c445521b56a9046f0c50a897c6ffea 100644 --- a/substrate/frame/sudo/src/tests.rs +++ b/substrate/frame/sudo/src/tests.rs @@ -18,17 +18,17 @@ //! Tests for the module. use super::*; +use frame_support::{assert_noop, assert_ok}; use mock::{ - Sudo, SudoCall, Origin, Call, Test, new_test_ext, LoggerCall, Logger, System, - Event as TestEvent, + new_test_ext, Call, Event as TestEvent, Logger, LoggerCall, Origin, Sudo, SudoCall, System, + Test, }; -use frame_support::{assert_ok, assert_noop}; #[test] fn test_setup_works() { // Environment setup, logger storage, and sudo `key` retrieval should work as expected. new_test_ext(1).execute_with(|| { - assert_eq!(Sudo::key(), 1u64); + assert_eq!(Sudo::key(), 1u64); assert!(Logger::i32_log().is_empty()); assert!(Logger::account_log().is_empty()); }); @@ -105,7 +105,7 @@ fn set_key_basics() { new_test_ext(1).execute_with(|| { // A root `key` can change the root `key` assert_ok!(Sudo::set_key(Origin::signed(1), 2)); - assert_eq!(Sudo::key(), 2u64); + assert_eq!(Sudo::key(), 2u64); }); new_test_ext(1).execute_with(|| { @@ -146,14 +146,14 @@ fn sudo_as_basics() { let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); assert_eq!(Logger::i32_log(), vec![42i32]); - // The correct user makes the call within `sudo_as`. + // The correct user makes the call within `sudo_as`. assert_eq!(Logger::account_log(), vec![2]); }); } #[test] fn sudo_as_emits_events_correctly() { - new_test_ext(1).execute_with(|| { + new_test_ext(1).execute_with(|| { // Set block number to 1 because events are not emitted on block 0. System::set_block_number(1); diff --git a/substrate/frame/support/procedural/src/clone_no_bound.rs b/substrate/frame/support/procedural/src/clone_no_bound.rs index 1911fdfd9fb29484f3d08c92ee31e327cbd6069a..747900fd023f66d4a93eda6846e7ffd45eab4538 100644 --- a/substrate/frame/support/procedural/src/clone_no_bound.rs +++ b/substrate/frame/support/procedural/src/clone_no_bound.rs @@ -30,56 +30,61 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke let impl_ = match input.data { syn::Data::Struct(struct_) => match struct_.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() - .map(|i| &i.ident) - .map(|i| quote::quote_spanned!(i.span() => + let fields = named.named.iter().map(|i| &i.ident).map(|i| { + quote::quote_spanned!(i.span() => #i: core::clone::Clone::clone(&self.#i) - )); + ) + }); quote::quote!( Self { #( #fields, )* } ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() - .map(|(i, _)| syn::Index::from(i)) - .map(|i| quote::quote_spanned!(i.span() => - core::clone::Clone::clone(&self.#i) - )); + let fields = + unnamed.unnamed.iter().enumerate().map(|(i, _)| syn::Index::from(i)).map(|i| { + quote::quote_spanned!(i.span() => + core::clone::Clone::clone(&self.#i) + ) + }); quote::quote!( Self ( #( #fields, )* ) ) }, syn::Fields::Unit => { - quote::quote!( Self ) - } + quote::quote!(Self) + }, }, syn::Data::Enum(enum_) => { - let variants = enum_.variants.iter() - .map(|variant| { - let ident = &variant.ident; - match &variant.fields { - syn::Fields::Named(named) => { - let captured = named.named.iter().map(|i| &i.ident); - let cloned = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => - #i: core::clone::Clone::clone(#i) - )); - quote::quote!( - Self::#ident { #( ref #captured, )* } => Self::#ident { #( #cloned, )*} + let variants = enum_.variants.iter().map(|variant| { + let ident = &variant.ident; + match &variant.fields { + syn::Fields::Named(named) => { + let captured = named.named.iter().map(|i| &i.ident); + let cloned = captured.clone().map(|i| { + quote::quote_spanned!(i.span() => + #i: core::clone::Clone::clone(#i) ) - }, - syn::Fields::Unnamed(unnamed) => { - let captured = unnamed.unnamed.iter().enumerate() - .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); - let cloned = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => - core::clone::Clone::clone(#i) - )); - quote::quote!( - Self::#ident ( #( ref #captured, )* ) => Self::#ident ( #( #cloned, )*) + }); + quote::quote!( + Self::#ident { #( ref #captured, )* } => Self::#ident { #( #cloned, )*} + ) + }, + syn::Fields::Unnamed(unnamed) => { + let captured = unnamed + .unnamed + .iter() + .enumerate() + .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); + let cloned = captured.clone().map(|i| { + quote::quote_spanned!(i.span() => + core::clone::Clone::clone(#i) ) - }, - syn::Fields::Unit => quote::quote!( Self::#ident => Self::#ident ), - } - }); + }); + quote::quote!( + Self::#ident ( #( ref #captured, )* ) => Self::#ident ( #( #cloned, )*) + ) + }, + syn::Fields::Unit => quote::quote!( Self::#ident => Self::#ident ), + } + }); quote::quote!(match self { #( #variants, )* @@ -99,5 +104,6 @@ pub fn derive_clone_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke } } }; - ).into() + ) + .into() } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs index 6a44468f25b2c4fbd02e6cd4ff50c2d6901ba1c5..f847bc6dbfbdd70fb6355b3a3464ced8ce77ddfc 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/call.rs @@ -30,16 +30,16 @@ pub fn expand_outer_dispatch( let mut query_call_part_macros = Vec::new(); let mut pallet_names = Vec::new(); - let pallets_with_call = pallet_decls - .iter() - .filter(|decl| decl.exists_part("Call")); + let pallets_with_call = pallet_decls.iter().filter(|decl| decl.exists_part("Call")); for pallet_declaration in pallets_with_call { let name = &pallet_declaration.name; let path = &pallet_declaration.path; let index = pallet_declaration.index; - variant_defs.extend(quote!(#[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ),)); + variant_defs.extend( + quote!(#[codec(index = #index)] #name( #scrate::dispatch::CallableCallFor<#name, #runtime> ),), + ); variant_patterns.push(quote!(Call::#name(call))); pallet_names.push(name); query_call_part_macros.push(quote! { diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs index 8dc2710b192d16c65240478e3d6726c99485ae28..5e1b9d94700e679da63d6f461bd9d8e6ffb4e59e 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/config.rs @@ -18,7 +18,7 @@ use crate::construct_runtime::Pallet; use inflector::Inflector; use proc_macro2::TokenStream; -use quote::{ToTokens, format_ident, quote}; +use quote::{format_ident, quote, ToTokens}; use syn::Ident; pub fn expand_outer_config( @@ -37,15 +37,18 @@ pub fn expand_outer_config( let pallet_name = &decl.name; let path_str = path.into_token_stream().to_string(); let config = format_ident!("{}Config", pallet_name); - let field_name = &Ident::new( - &pallet_name.to_string().to_snake_case(), - decl.name.span(), - ); + let field_name = + &Ident::new(&pallet_name.to_string().to_snake_case(), decl.name.span()); let part_is_generic = !pallet_entry.generics.params.is_empty(); types.extend(expand_config_types(runtime, decl, &config, part_is_generic)); fields.extend(quote!(pub #field_name: #config,)); - build_storage_calls.extend(expand_config_build_storage_call(scrate, runtime, decl, &field_name)); + build_storage_calls.extend(expand_config_build_storage_call( + scrate, + runtime, + decl, + &field_name, + )); query_genesis_config_part_macros.push(quote! { #path::__substrate_genesis_config_check::is_genesis_config_defined!(#pallet_name); #[cfg(feature = "std")] @@ -97,15 +100,15 @@ fn expand_config_types( let path = &decl.path; match (decl.instance.as_ref(), part_is_generic) { - (Some(inst), true) => quote!{ + (Some(inst), true) => quote! { #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime, #path::#inst>; }, - (None, true) => quote!{ + (None, true) => quote! { #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig<#runtime>; }, - (_, false) => quote!{ + (_, false) => quote! { #[cfg(any(feature = "std", test))] pub type #config = #path::GenesisConfig; }, @@ -125,7 +128,7 @@ fn expand_config_build_storage_call( quote!(#path::__InherentHiddenInstance) }; - quote!{ + quote! { #scrate::sp_runtime::BuildModuleGenesisStorage:: <#runtime, #instance>::build_module_genesis_storage(&self.#field_name, storage)?; } diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/event.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/event.rs index d304a30b7df0142983d9db260b81848d6711ce68..a04759ec972b68df097bdcc25d298d8de7366cc4 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/event.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/event.rs @@ -43,7 +43,7 @@ pub fn expand_outer_event( be constructed: pallet `{}` must have generic `Event`", pallet_name, ); - return Err(syn::Error::new(pallet_name.span(), msg)); + return Err(syn::Error::new(pallet_name.span(), msg)) } let part_is_generic = !generics.params.is_empty(); @@ -54,7 +54,13 @@ pub fn expand_outer_event( (None, false) => quote!(#path::Event), }; - event_variants.extend(expand_event_variant(runtime, pallet_decl, index, instance, generics)); + event_variants.extend(expand_event_variant( + runtime, + pallet_decl, + index, + instance, + generics, + )); event_conversions.extend(expand_event_conversion(scrate, pallet_decl, &pallet_event)); query_event_part_macros.push(quote! { #path::__substrate_event_check::is_event_part_defined!(#pallet_name); @@ -94,16 +100,16 @@ fn expand_event_variant( match instance { Some(inst) if part_is_generic => { quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime, #path::#inst>),) - } + }, Some(inst) => { quote!(#[codec(index = #index)] #variant_name(#path::Event<#path::#inst>),) - } + }, None if part_is_generic => { quote!(#[codec(index = #index)] #variant_name(#path::Event<#runtime>),) - } + }, None => { quote!(#[codec(index = #index)] #variant_name(#path::Event),) - } + }, } } @@ -114,7 +120,7 @@ fn expand_event_conversion( ) -> TokenStream { let variant_name = &pallet.name; - quote!{ + quote! { impl From<#pallet_event> for Event { fn from(x: #pallet_event) -> Self { Event::#variant_name(x) diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index 5854d0edccabb35d760699d29cc40b0ea1648e59..fa12242f4fcd651be83fe644dde1c330326881fe 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -15,10 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License -use proc_macro2::TokenStream; use crate::construct_runtime::Pallet; -use syn::{Ident, TypePath}; +use proc_macro2::TokenStream; use quote::quote; +use syn::{Ident, TypePath}; pub fn expand_runtime_metadata( runtime: &Ident, @@ -48,7 +48,7 @@ pub fn expand_runtime_metadata( let constants = expand_pallet_metadata_constants(runtime, scrate, decl); let errors = expand_pallet_metadata_errors(runtime, scrate, decl); - quote!{ + quote! { #scrate::metadata::ModuleMetadata { name: #scrate::metadata::DecodeDifferent::Encode(stringify!(#name)), index: #index, @@ -62,7 +62,7 @@ pub fn expand_runtime_metadata( }) .collect::>(); - quote!{ + quote! { impl #runtime { pub fn metadata() -> #scrate::metadata::RuntimeMetadataPrefixed { #scrate::metadata::RuntimeMetadataLastVersion { @@ -94,7 +94,7 @@ fn expand_pallet_metadata_storage( let instance = decl.instance.as_ref().into_iter(); let path = &decl.path; - quote!{ + quote! { Some(#scrate::metadata::DecodeDifferent::Encode( #scrate::metadata::FnEncode( #path::Pallet::<#runtime #(, #path::#instance)*>::storage_metadata @@ -116,7 +116,7 @@ fn expand_pallet_metadata_calls( let instance = decl.instance.as_ref().into_iter(); let path = &decl.path; - quote!{ + quote! { Some(#scrate::metadata::DecodeDifferent::Encode( #scrate::metadata::FnEncode( #path::Pallet::<#runtime #(, #path::#instance)*>::call_functions @@ -136,8 +136,12 @@ fn expand_pallet_metadata_events( ) -> TokenStream { if filtered_names.contains(&"Event") { let path = &decl.path; - let part_is_generic = - !decl.find_part("Event").expect("Event part exists; qed").generics.params.is_empty(); + let part_is_generic = !decl + .find_part("Event") + .expect("Event part exists; qed") + .generics + .params + .is_empty(); let pallet_event = match (decl.instance.as_ref(), part_is_generic) { (Some(inst), true) => quote!(#path::Event::<#runtime, #path::#inst>), (Some(inst), false) => quote!(#path::Event::<#path::#inst>), @@ -145,7 +149,7 @@ fn expand_pallet_metadata_events( (None, false) => quote!(#path::Event), }; - quote!{ + quote! { Some(#scrate::metadata::DecodeDifferent::Encode( #scrate::metadata::FnEncode(#pallet_event::metadata) )) @@ -163,7 +167,7 @@ fn expand_pallet_metadata_constants( let path = &decl.path; let instance = decl.instance.as_ref().into_iter(); - quote!{ + quote! { #scrate::metadata::DecodeDifferent::Encode( #scrate::metadata::FnEncode( #path::Pallet::<#runtime #(, #path::#instance)*>::module_constants_metadata @@ -180,7 +184,7 @@ fn expand_pallet_metadata_errors( let path = &decl.path; let instance = decl.instance.as_ref().into_iter(); - quote!{ + quote! { #scrate::metadata::DecodeDifferent::Encode( #scrate::metadata::FnEncode( <#path::Pallet::<#runtime #(, #path::#instance)*> as #scrate::metadata::ModuleErrorMetadata>::metadata diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs index 962d2583594093b36f35ce4a7e8f1b4e8c364a7a..5091867eeef5a9653829f325968b40d71c9368de 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -18,7 +18,7 @@ use crate::construct_runtime::{Pallet, SYSTEM_PALLET_NAME}; use proc_macro2::TokenStream; use quote::quote; -use syn::{token, Ident, Generics}; +use syn::{token, Generics, Ident}; pub fn expand_outer_origin( runtime: &Ident, @@ -26,13 +26,14 @@ pub fn expand_outer_origin( pallets_token: token::Brace, scrate: &TokenStream, ) -> syn::Result { - let system_pallet = pallets.iter() - .find(|decl| decl.name == SYSTEM_PALLET_NAME) - .ok_or_else(|| syn::Error::new( - pallets_token.span, - "`System` pallet declaration is missing. \ + let system_pallet = + pallets.iter().find(|decl| decl.name == SYSTEM_PALLET_NAME).ok_or_else(|| { + syn::Error::new( + pallets_token.span, + "`System` pallet declaration is missing. \ Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},`", - ))?; + ) + })?; let mut caller_variants = TokenStream::new(); let mut pallet_conversions = TokenStream::new(); @@ -52,15 +53,23 @@ pub fn expand_outer_origin( be constructed: pallet `{}` must have generic `Origin`", name ); - return Err(syn::Error::new(name.span(), msg)); + return Err(syn::Error::new(name.span(), msg)) } - caller_variants.extend( - expand_origin_caller_variant(runtime, pallet_decl, index, instance, generics), - ); - pallet_conversions.extend( - expand_origin_pallet_conversions(scrate, runtime, pallet_decl, instance, generics), - ); + caller_variants.extend(expand_origin_caller_variant( + runtime, + pallet_decl, + index, + instance, + generics, + )); + pallet_conversions.extend(expand_origin_pallet_conversions( + scrate, + runtime, + pallet_decl, + instance, + generics, + )); query_origin_part_macros.push(quote! { #path::__substrate_origin_check::is_origin_part_defined!(#name); }); @@ -270,16 +279,16 @@ fn expand_origin_caller_variant( match instance { Some(inst) if part_is_generic => { quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime, #path::#inst>),) - } + }, Some(inst) => { quote!(#[codec(index = #index)] #variant_name(#path::Origin<#path::#inst>),) - } + }, None if part_is_generic => { quote!(#[codec(index = #index)] #variant_name(#path::Origin<#runtime>),) - } + }, None => { quote!(#[codec(index = #index)] #variant_name(#path::Origin),) - } + }, } } @@ -301,7 +310,7 @@ fn expand_origin_pallet_conversions( None => quote!(#path::Origin), }; - quote!{ + quote! { impl From<#pallet_origin> for OriginCaller { fn from(x: #pallet_origin) -> Self { OriginCaller::#variant_name(x) diff --git a/substrate/frame/support/procedural/src/construct_runtime/mod.rs b/substrate/frame/support/procedural/src/construct_runtime/mod.rs index 6f8924a14bccbeb597ba11511d0b7f96ec1a1573..402cb5458851d67a42804f55299e7b5e6a419a9a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/mod.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/mod.rs @@ -18,14 +18,15 @@ mod expand; mod parse; -use frame_support_procedural_tools::syn_ext as ext; -use frame_support_procedural_tools::{generate_crate_access, generate_hidden_includes}; +use frame_support_procedural_tools::{ + generate_crate_access, generate_hidden_includes, syn_ext as ext, +}; use parse::{PalletDeclaration, PalletPart, PalletPath, RuntimeDefinition, WhereSection}; use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use quote::quote; -use syn::{Ident, Result}; use std::collections::HashMap; +use syn::{Ident, Result}; /// The fixed name of the system pallet. const SYSTEM_PALLET_NAME: &str = "System"; @@ -65,48 +66,44 @@ fn complete_pallets(decl: impl Iterator) -> syn::Resul let mut last_index: Option = None; let mut names = HashMap::new(); - decl - .map(|pallet| { - let final_index = match pallet.index { - Some(i) => i, - None => last_index.map_or(Some(0), |i| i.checked_add(1)) - .ok_or_else(|| { - let msg = "Pallet index doesn't fit into u8, index is 256"; - syn::Error::new(pallet.name.span(), msg) - })?, - }; - - last_index = Some(final_index); - - if let Some(used_pallet) = indices.insert(final_index, pallet.name.clone()) { - let msg = format!( - "Pallet indices are conflicting: Both pallets {} and {} are at index {}", - used_pallet, - pallet.name, - final_index, - ); - let mut err = syn::Error::new(used_pallet.span(), &msg); - err.combine(syn::Error::new(pallet.name.span(), msg)); - return Err(err); - } + decl.map(|pallet| { + let final_index = match pallet.index { + Some(i) => i, + None => last_index.map_or(Some(0), |i| i.checked_add(1)).ok_or_else(|| { + let msg = "Pallet index doesn't fit into u8, index is 256"; + syn::Error::new(pallet.name.span(), msg) + })?, + }; - if let Some(used_pallet) = names.insert(pallet.name.clone(), pallet.name.span()) { - let msg = "Two pallets with the same name!"; + last_index = Some(final_index); - let mut err = syn::Error::new(used_pallet, &msg); - err.combine(syn::Error::new(pallet.name.span(), &msg)); - return Err(err); - } + if let Some(used_pallet) = indices.insert(final_index, pallet.name.clone()) { + let msg = format!( + "Pallet indices are conflicting: Both pallets {} and {} are at index {}", + used_pallet, pallet.name, final_index, + ); + let mut err = syn::Error::new(used_pallet.span(), &msg); + err.combine(syn::Error::new(pallet.name.span(), msg)); + return Err(err) + } - Ok(Pallet { - name: pallet.name, - index: final_index, - path: pallet.path, - instance: pallet.instance, - pallet_parts: pallet.pallet_parts, - }) + if let Some(used_pallet) = names.insert(pallet.name.clone(), pallet.name.span()) { + let msg = "Two pallets with the same name!"; + + let mut err = syn::Error::new(used_pallet, &msg); + err.combine(syn::Error::new(pallet.name.span(), &msg)); + return Err(err) + } + + Ok(Pallet { + name: pallet.name, + index: final_index, + path: pallet.path, + instance: pallet.instance, + pallet_parts: pallet.pallet_parts, }) - .collect() + }) + .collect() } pub fn construct_runtime(input: TokenStream) -> TokenStream { @@ -119,17 +116,9 @@ pub fn construct_runtime(input: TokenStream) -> TokenStream { fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result { let RuntimeDefinition { name, - where_section: WhereSection { - block, - node_block, - unchecked_extrinsic, - .. - }, + where_section: WhereSection { block, node_block, unchecked_extrinsic, .. }, pallets: - ext::Braces { - content: ext::Punctuated { inner: pallets, .. }, - token: pallets_token, - }, + ext::Braces { content: ext::Punctuated { inner: pallets, .. }, token: pallets_token }, .. } = definition; @@ -148,13 +137,8 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result( let type_name = &pallet_declaration.name; let pallet = &pallet_declaration.path; let mut generics = vec![quote!(#runtime)]; - generics.extend( - pallet_declaration - .instance - .iter() - .map(|name| quote!(#pallet::#name)), - ); + generics.extend(pallet_declaration.instance.iter().map(|name| quote!(#pallet::#name))); let type_decl = quote!( pub type #type_name = #pallet::Pallet <#(#generics),*>; ); @@ -224,11 +203,13 @@ fn decl_all_pallets<'a>( } // Make nested tuple structure like (((Babe, Consensus), Grandpa), ...) // But ignore the system pallet. - let all_pallets = names.iter() + let all_pallets = names + .iter() .filter(|n| **n != SYSTEM_PALLET_NAME) .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - let all_pallets_with_system = names.iter() + let all_pallets_with_system = names + .iter() .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); quote!( @@ -258,8 +239,7 @@ fn decl_pallet_runtime_setup( let names = pallet_declarations.iter().map(|d| &d.name); let names2 = pallet_declarations.iter().map(|d| &d.name); let name_strings = pallet_declarations.iter().map(|d| d.name.to_string()); - let indices = pallet_declarations.iter() - .map(|pallet| pallet.index as usize); + let indices = pallet_declarations.iter().map(|pallet| pallet.index as usize); quote!( /// Provides an implementation of `PalletInfo` to provide information diff --git a/substrate/frame/support/procedural/src/construct_runtime/parse.rs b/substrate/frame/support/procedural/src/construct_runtime/parse.rs index 2d242749cfe0178b30edf72ddb06562fe6c5844b..6f2fd82e73f4bca92ba67f992d1200350268383f 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/parse.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/parse.rs @@ -77,9 +77,9 @@ impl Parse for WhereSection { definitions.push(definition); if !input.peek(Token![,]) { if !input.peek(token::Brace) { - return Err(input.error("Expected `,` or `{`")); + return Err(input.error("Expected `,` or `{`")) } - break; + break } input.parse::()?; } @@ -87,23 +87,14 @@ impl Parse for WhereSection { let node_block = remove_kind(input, WhereKind::NodeBlock, &mut definitions)?.value; let unchecked_extrinsic = remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?.value; - if let Some(WhereDefinition { - ref kind_span, - ref kind, - .. - }) = definitions.first() - { + if let Some(WhereDefinition { ref kind_span, ref kind, .. }) = definitions.first() { let msg = format!( "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", kind, kind ); - return Err(Error::new(*kind_span, msg)); + return Err(Error::new(*kind_span, msg)) } - Ok(Self { - block, - node_block, - unchecked_extrinsic, - }) + Ok(Self { block, node_block, unchecked_extrinsic }) } } @@ -127,17 +118,11 @@ impl Parse for WhereDefinition { let (kind_span, kind) = if lookahead.peek(keyword::Block) { (input.parse::()?.span(), WhereKind::Block) } else if lookahead.peek(keyword::NodeBlock) { - ( - input.parse::()?.span(), - WhereKind::NodeBlock, - ) + (input.parse::()?.span(), WhereKind::NodeBlock) } else if lookahead.peek(keyword::UncheckedExtrinsic) { - ( - input.parse::()?.span(), - WhereKind::UncheckedExtrinsic, - ) + (input.parse::()?.span(), WhereKind::UncheckedExtrinsic) } else { - return Err(lookahead.error()); + return Err(lookahead.error()) }; Ok(Self { @@ -187,13 +172,7 @@ impl Parse for PalletDeclaration { None }; - let parsed = Self { - name, - path, - instance, - pallet_parts, - index, - }; + let parsed = Self { name, path, instance, pallet_parts, index }; Ok(parsed) } @@ -214,17 +193,17 @@ impl Parse for PalletPath { let mut lookahead = input.lookahead1(); let mut segments = Punctuated::new(); - if lookahead.peek(Token![crate]) - || lookahead.peek(Token![self]) - || lookahead.peek(Token![super]) - || lookahead.peek(Ident) + if lookahead.peek(Token![crate]) || + lookahead.peek(Token![self]) || + lookahead.peek(Token![super]) || + lookahead.peek(Ident) { let ident = input.call(Ident::parse_any)?; segments.push(PathSegment { ident, arguments: PathArguments::None }); let _: Token![::] = input.parse()?; lookahead = input.lookahead1(); } else { - return Err(lookahead.error()); + return Err(lookahead.error()) } while lookahead.peek(Ident) { @@ -235,15 +214,10 @@ impl Parse for PalletPath { } if !lookahead.peek(token::Brace) && !lookahead.peek(Token![<]) { - return Err(lookahead.error()); + return Err(lookahead.error()) } - Ok(Self { - inner: Path { - leading_colon: None, - segments, - } - }) + Ok(Self { inner: Path { leading_colon: None, segments } }) } } @@ -257,7 +231,7 @@ impl quote::ToTokens for PalletPath { /// /// `{ Call, Event }` fn parse_pallet_parts(input: ParseStream) -> Result> { - let pallet_parts :ext::Braces> = input.parse()?; + let pallet_parts: ext::Braces> = input.parse()?; let mut resolved = HashSet::new(); for part in pallet_parts.content.inner.iter() { @@ -266,7 +240,7 @@ fn parse_pallet_parts(input: ParseStream) -> Result> { "`{}` was already declared before. Please remove the duplicate declaration", part.name(), ); - return Err(Error::new(part.keyword.span(), msg)); + return Err(Error::new(part.keyword.span(), msg)) } } @@ -371,13 +345,10 @@ impl Parse for PalletPart { keyword.name(), valid_generics, ); - return Err(syn::Error::new(keyword.span(), msg)); + return Err(syn::Error::new(keyword.span(), msg)) } - Ok(Self { - keyword, - generics, - }) + Ok(Self { keyword, generics }) } } diff --git a/substrate/frame/support/procedural/src/debug_no_bound.rs b/substrate/frame/support/procedural/src/debug_no_bound.rs index 7a5509cf986dc8991fb0f2b130191e1b9c3910b5..acfd8d0cabc8a83e88b4f421bdd302f2071debf8 100644 --- a/substrate/frame/support/procedural/src/debug_no_bound.rs +++ b/substrate/frame/support/procedural/src/debug_no_bound.rs @@ -30,9 +30,10 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke let impl_ = match input.data { syn::Data::Struct(struct_) => match struct_.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() - .map(|i| &i.ident) - .map(|i| quote::quote_spanned!(i.span() => .field(stringify!(#i), &self.#i) )); + let fields = + named.named.iter().map(|i| &i.ident).map( + |i| quote::quote_spanned!(i.span() => .field(stringify!(#i), &self.#i) ), + ); quote::quote!( fmt.debug_struct(stringify!(#input_ident)) @@ -41,7 +42,10 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() + let fields = unnamed + .unnamed + .iter() + .enumerate() .map(|(i, _)| syn::Index::from(i)) .map(|i| quote::quote_spanned!(i.span() => .field(&self.#i) )); @@ -51,46 +55,50 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke .finish() ) }, - syn::Fields::Unit => quote::quote!( fmt.write_str(stringify!(#input_ident)) ), + syn::Fields::Unit => quote::quote!(fmt.write_str(stringify!(#input_ident))), }, syn::Data::Enum(enum_) => { - let variants = enum_.variants.iter() - .map(|variant| { - let ident = &variant.ident; - let full_variant_str = format!("{}::{}", input_ident, ident); - match &variant.fields { - syn::Fields::Named(named) => { - let captured = named.named.iter().map(|i| &i.ident); - let debugged = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => - .field(stringify!(#i), &#i) - )); - quote::quote!( - Self::#ident { #( ref #captured, )* } => { - fmt.debug_struct(#full_variant_str) - #( #debugged )* - .finish() - } + let variants = enum_.variants.iter().map(|variant| { + let ident = &variant.ident; + let full_variant_str = format!("{}::{}", input_ident, ident); + match &variant.fields { + syn::Fields::Named(named) => { + let captured = named.named.iter().map(|i| &i.ident); + let debugged = captured.clone().map(|i| { + quote::quote_spanned!(i.span() => + .field(stringify!(#i), &#i) ) - }, - syn::Fields::Unnamed(unnamed) => { - let captured = unnamed.unnamed.iter().enumerate() - .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); - let debugged = captured.clone() - .map(|i| quote::quote_spanned!(i.span() => .field(&#i))); - quote::quote!( - Self::#ident ( #( ref #captured, )* ) => { - fmt.debug_tuple(#full_variant_str) - #( #debugged )* - .finish() - } - ) - }, - syn::Fields::Unit => quote::quote!( - Self::#ident => fmt.write_str(#full_variant_str) - ), - } - }); + }); + quote::quote!( + Self::#ident { #( ref #captured, )* } => { + fmt.debug_struct(#full_variant_str) + #( #debugged )* + .finish() + } + ) + }, + syn::Fields::Unnamed(unnamed) => { + let captured = unnamed + .unnamed + .iter() + .enumerate() + .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); + let debugged = captured + .clone() + .map(|i| quote::quote_spanned!(i.span() => .field(&#i))); + quote::quote!( + Self::#ident ( #( ref #captured, )* ) => { + fmt.debug_tuple(#full_variant_str) + #( #debugged )* + .finish() + } + ) + }, + syn::Fields::Unit => quote::quote!( + Self::#ident => fmt.write_str(#full_variant_str) + ), + } + }); quote::quote!(match *self { #( #variants, )* @@ -110,5 +118,6 @@ pub fn derive_debug_no_bound(input: proc_macro::TokenStream) -> proc_macro::Toke } } }; - ).into() + ) + .into() } diff --git a/substrate/frame/support/procedural/src/default_no_bound.rs b/substrate/frame/support/procedural/src/default_no_bound.rs index ed35e057f0377049c826b69921037a047b784189..38d6e19b1732f5fe2ea082c07f5c83a954c95987 100644 --- a/substrate/frame/support/procedural/src/default_no_bound.rs +++ b/substrate/frame/support/procedural/src/default_no_bound.rs @@ -30,56 +30,60 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To let impl_ = match input.data { syn::Data::Struct(struct_) => match struct_.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() - .map(|i| &i.ident) - .map(|i| quote::quote_spanned!(i.span() => + let fields = named.named.iter().map(|i| &i.ident).map(|i| { + quote::quote_spanned!(i.span() => #i: core::default::Default::default() - )); + ) + }); quote::quote!( Self { #( #fields, )* } ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() - .map(|(i, _)| syn::Index::from(i)) - .map(|i| quote::quote_spanned!(i.span() => - core::default::Default::default() - )); + let fields = + unnamed.unnamed.iter().enumerate().map(|(i, _)| syn::Index::from(i)).map(|i| { + quote::quote_spanned!(i.span() => + core::default::Default::default() + ) + }); quote::quote!( Self ( #( #fields, )* ) ) }, syn::Fields::Unit => { - quote::quote!( Self ) - } + quote::quote!(Self) + }, }, - syn::Data::Enum(enum_) => { + syn::Data::Enum(enum_) => if let Some(first_variant) = enum_.variants.first() { let variant_ident = &first_variant.ident; match &first_variant.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() - .map(|i| &i.ident) - .map(|i| quote::quote_spanned!(i.span() => + let fields = named.named.iter().map(|i| &i.ident).map(|i| { + quote::quote_spanned!(i.span() => #i: core::default::Default::default() - )); + ) + }); quote::quote!( #name :: #ty_generics :: #variant_ident { #( #fields, )* } ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() + let fields = unnamed + .unnamed + .iter() + .enumerate() .map(|(i, _)| syn::Index::from(i)) - .map(|i| quote::quote_spanned!(i.span() => - core::default::Default::default() - )); + .map(|i| { + quote::quote_spanned!(i.span() => + core::default::Default::default() + ) + }); quote::quote!( #name :: #ty_generics :: #variant_ident ( #( #fields, )* ) ) }, syn::Fields::Unit => quote::quote!( #name :: #ty_generics :: #variant_ident ), } } else { - quote::quote!( Self ) - } - - }, + quote::quote!(Self) + }, syn::Data::Union(_) => { let msg = "Union type not supported by `derive(CloneNoBound)`"; return syn::Error::new(input.span(), msg).to_compile_error().into() @@ -94,5 +98,6 @@ pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::To } } }; - ).into() + ) + .into() } diff --git a/substrate/frame/support/procedural/src/dummy_part_checker.rs b/substrate/frame/support/procedural/src/dummy_part_checker.rs index f1649aebe970f636f8a6f1d3ed0596b7a8a1098d..792b17a8f775855eb4f277f76b10bbdd16c44e21 100644 --- a/substrate/frame/support/procedural/src/dummy_part_checker.rs +++ b/substrate/frame/support/procedural/src/dummy_part_checker.rs @@ -1,18 +1,17 @@ -use proc_macro::TokenStream; use crate::COUNTER; +use proc_macro::TokenStream; pub fn generate_dummy_part_checker(input: TokenStream) -> TokenStream { if !input.is_empty() { return syn::Error::new(proc_macro2::Span::call_site(), "No arguments expected") - .to_compile_error().into() + .to_compile_error() + .into() } let count = COUNTER.with(|counter| counter.borrow_mut().inc()); - let no_op_macro_ident = syn::Ident::new( - &format!("__dummy_part_checker_{}", count), - proc_macro2::Span::call_site(), - ); + let no_op_macro_ident = + syn::Ident::new(&format!("__dummy_part_checker_{}", count), proc_macro2::Span::call_site()); quote::quote!( #[macro_export] @@ -58,5 +57,6 @@ pub fn generate_dummy_part_checker(input: TokenStream) -> TokenStream { #[doc(hidden)] pub use #no_op_macro_ident as is_origin_part_defined; } - ).into() + ) + .into() } diff --git a/substrate/frame/support/procedural/src/key_prefix.rs b/substrate/frame/support/procedural/src/key_prefix.rs index 17c310c2bcadc96c129e8c5ae0921da2da3301ef..c4683bc456daf9b46d95639ddc36dec718ae3173 100644 --- a/substrate/frame/support/procedural/src/key_prefix.rs +++ b/substrate/frame/support/procedural/src/key_prefix.rs @@ -16,14 +16,14 @@ // limitations under the License. use proc_macro2::{Span, TokenStream}; -use quote::{ToTokens, format_ident, quote}; +use quote::{format_ident, quote, ToTokens}; use syn::{Ident, Result}; const MAX_IDENTS: usize = 18; pub fn impl_key_prefix_for_tuples(input: proc_macro::TokenStream) -> Result { if !input.is_empty() { - return Err(syn::Error::new(Span::call_site(), "No arguments expected")); + return Err(syn::Error::new(Span::call_site(), "No arguments expected")) } let mut all_trait_impls = TokenStream::new(); @@ -36,13 +36,17 @@ pub fn impl_key_prefix_for_tuples(input: proc_macro::TokenStream) -> Result>(); - let kargs = prefixes.iter().map(|ident| format_ident!("KArg{}", ident)).collect::>(); + let hashers = current_tuple + .iter() + .map(|ident| format_ident!("Hasher{}", ident)) + .collect::>(); + let kargs = + prefixes.iter().map(|ident| format_ident!("KArg{}", ident)).collect::>(); let partial_keygen = generate_keygen(prefixes); let suffix_keygen = generate_keygen(suffixes); let suffix_tuple = generate_tuple(suffixes); - let trait_impls = quote!{ + let trait_impls = quote! { impl< #(#current_tuple: FullCodec,)* #(#hashers: StorageHasher,)* diff --git a/substrate/frame/support/procedural/src/lib.rs b/substrate/frame/support/procedural/src/lib.rs index 9ac648f5e795a3619f060a7b7701f8dd9604c3b9..ab9ea156347921db1e54b5dc67db6f5be8b2845f 100644 --- a/substrate/frame/support/procedural/src/lib.rs +++ b/substrate/frame/support/procedural/src/lib.rs @@ -19,21 +19,21 @@ #![recursion_limit = "512"] -mod storage; +mod clone_no_bound; mod construct_runtime; -mod pallet; -mod pallet_version; -mod transactional; mod debug_no_bound; -mod clone_no_bound; -mod partial_eq_no_bound; mod default_no_bound; -mod key_prefix; mod dummy_part_checker; +mod key_prefix; +mod pallet; +mod pallet_version; +mod partial_eq_no_bound; +mod storage; +mod transactional; -pub(crate) use storage::INHERENT_INSTANCE_NAME; use proc_macro::TokenStream; use std::cell::RefCell; +pub(crate) use storage::INHERENT_INSTANCE_NAME; thread_local! { /// A global counter, can be used to generate a relatively unique identifier. @@ -200,14 +200,14 @@ impl Counter { /// /// // Your storage items /// } -/// add_extra_genesis { -/// config(genesis_field): GenesisFieldType; -/// config(genesis_field2): GenesisFieldType; -/// ... -/// build(|_: &Self| { -/// // Modification of storage -/// }) -/// } +/// add_extra_genesis { +/// config(genesis_field): GenesisFieldType; +/// config(genesis_field2): GenesisFieldType; +/// ... +/// build(|_: &Self| { +/// // Modification of storage +/// }) +/// } /// } /// ``` /// @@ -219,7 +219,7 @@ impl Counter { /// ..., /// Example: example::{Pallet, Storage, ..., Config}, /// ..., -/// } +/// } /// ); /// ``` /// @@ -413,7 +413,8 @@ pub fn derive_runtime_debug_no_bound(input: TokenStream) -> TokenStream { } } }; - ).into() + ) + .into() } #[cfg(feature = "std")] @@ -444,7 +445,8 @@ pub fn derive_eq_no_bound(input: TokenStream) -> TokenStream { const _: () = { impl #impl_generics core::cmp::Eq for #name #ty_generics #where_clause {} }; - ).into() + ) + .into() } /// derive `Default` but do no bound any generic. Docs are at `frame_support::DefaultNoBound`. @@ -455,12 +457,15 @@ pub fn derive_default_no_bound(input: TokenStream) -> TokenStream { #[proc_macro_attribute] pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStream { - transactional::require_transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) + transactional::require_transactional(attr, input) + .unwrap_or_else(|e| e.to_compile_error().into()) } #[proc_macro] pub fn crate_to_pallet_version(input: TokenStream) -> TokenStream { - pallet_version::crate_to_pallet_version(input).unwrap_or_else(|e| e.to_compile_error()).into() + pallet_version::crate_to_pallet_version(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } /// The number of module instances supported by the runtime, starting at index 1, @@ -471,7 +476,9 @@ pub(crate) const NUMBER_OF_INSTANCE: u8 = 16; /// It implements the trait `HasKeyPrefix` and `HasReversibleKeyPrefix` for tuple of `Key`. #[proc_macro] pub fn impl_key_prefix_for_tuples(input: TokenStream) -> TokenStream { - key_prefix::impl_key_prefix_for_tuples(input).unwrap_or_else(syn::Error::into_compile_error).into() + key_prefix::impl_key_prefix_for_tuples(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() } /// Internal macro use by frame_support to generate dummy part checker for old pallet declaration diff --git a/substrate/frame/support/procedural/src/pallet/expand/call.rs b/substrate/frame/support/procedural/src/pallet/expand/call.rs index 28280a5e892209c55abb28d5fd7a0fc669a0b22a..4dcee9e24fe39e9bb08a185df418a5d6230d0d40 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/call.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/call.rs @@ -15,9 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::Def; +use crate::{pallet::Def, COUNTER}; use frame_support_procedural_tools::clean_type_string; -use crate::COUNTER; use syn::spanned::Spanned; /// * Generate enum call and implement various trait on it. @@ -31,7 +30,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let docs = call.docs.clone(); (span, where_clause, methods, docs) - } + }, None => (def.item.span(), None, Vec::new(), Vec::new()), }; let frame_support = &def.frame_support; @@ -48,16 +47,20 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let fn_doc = methods.iter().map(|method| &method.docs).collect::>(); - let args_name = methods.iter() + let args_name = methods + .iter() .map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::>()) .collect::>(); - let args_type = methods.iter() + let args_type = methods + .iter() .map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::>()) .collect::>(); let args_compact_attr = methods.iter().map(|method| { - method.args.iter() + method + .args + .iter() .map(|(is_compact, _, type_)| { if *is_compact { quote::quote_spanned!(type_.span() => #[codec(compact)] ) @@ -69,7 +72,9 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { }); let args_metadata_type = methods.iter().map(|method| { - method.args.iter() + method + .args + .iter() .map(|(is_compact, _, type_)| { let final_type = if *is_compact { quote::quote_spanned!(type_.span() => Compact<#type_>) @@ -84,14 +89,10 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { let default_docs = [syn::parse_quote!( r"Contains one variant per dispatchable that can be called by an extrinsic." )]; - let docs = if docs.is_empty() { - &default_docs[..] - } else { - &docs[..] - }; + let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] }; let maybe_compile_error = if def.call.is_none() { - quote::quote!{ + quote::quote! { compile_error!(concat!( "`", stringify!($pallet_name), diff --git a/substrate/frame/support/procedural/src/pallet/expand/config.rs b/substrate/frame/support/procedural/src/pallet/expand/config.rs index 1e60313c55317f8d4c3d58c49209b20dac134699..306578cc3adc60c1d2ce60bbe9c41d4990ecc981 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/config.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/config.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::{Def, parse::helper::get_doc_literals}; +use crate::pallet::{parse::helper::get_doc_literals, Def}; /// * Generate default rust doc pub fn expand_config(def: &mut Def) -> proc_macro2::TokenStream { diff --git a/substrate/frame/support/procedural/src/pallet/expand/constants.rs b/substrate/frame/support/procedural/src/pallet/expand/constants.rs index e5acf42270aa747d8fcf90fc94530eba62f154ff..58df22e361c46b146ebac4792cad89c9f38d75a0 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/constants.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/constants.rs @@ -71,58 +71,55 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { } }); - let consts = config_consts.chain(extra_consts) - .map(|const_| { - let const_type = &const_.type_; - let const_type_str = clean_type_string(&const_type.to_token_stream().to_string()); - let ident = &const_.ident; - let ident_str = format!("{}", ident); - let doc = const_.doc.clone().into_iter(); - let default_byte_impl = &const_.default_byte_impl; - let default_byte_getter = syn::Ident::new( - &format!("{}DefaultByteGetter", ident), - ident.span() + let consts = config_consts.chain(extra_consts).map(|const_| { + let const_type = &const_.type_; + let const_type_str = clean_type_string(&const_type.to_token_stream().to_string()); + let ident = &const_.ident; + let ident_str = format!("{}", ident); + let doc = const_.doc.clone().into_iter(); + let default_byte_impl = &const_.default_byte_impl; + let default_byte_getter = + syn::Ident::new(&format!("{}DefaultByteGetter", ident), ident.span()); + + quote::quote!({ + #[allow(non_upper_case_types)] + #[allow(non_camel_case_types)] + struct #default_byte_getter<#type_decl_gen>( + #frame_support::sp_std::marker::PhantomData<(#type_use_gen)> ); - quote::quote!({ - #[allow(non_upper_case_types)] - #[allow(non_camel_case_types)] - struct #default_byte_getter<#type_decl_gen>( - #frame_support::sp_std::marker::PhantomData<(#type_use_gen)> - ); - - impl<#type_impl_gen> #frame_support::dispatch::DefaultByte for - #default_byte_getter<#type_use_gen> - #completed_where_clause - { - fn default_byte(&self) -> #frame_support::sp_std::vec::Vec { - #default_byte_impl - } + impl<#type_impl_gen> #frame_support::dispatch::DefaultByte for + #default_byte_getter<#type_use_gen> + #completed_where_clause + { + fn default_byte(&self) -> #frame_support::sp_std::vec::Vec { + #default_byte_impl } + } - unsafe impl<#type_impl_gen> Send for #default_byte_getter<#type_use_gen> - #completed_where_clause - {} - unsafe impl<#type_impl_gen> Sync for #default_byte_getter<#type_use_gen> - #completed_where_clause - {} - - #frame_support::dispatch::ModuleConstantMetadata { - name: #frame_support::dispatch::DecodeDifferent::Encode(#ident_str), - ty: #frame_support::dispatch::DecodeDifferent::Encode(#const_type_str), - value: #frame_support::dispatch::DecodeDifferent::Encode( - #frame_support::dispatch::DefaultByteGetter( - &#default_byte_getter::<#type_use_gen>( - #frame_support::sp_std::marker::PhantomData - ) + unsafe impl<#type_impl_gen> Send for #default_byte_getter<#type_use_gen> + #completed_where_clause + {} + unsafe impl<#type_impl_gen> Sync for #default_byte_getter<#type_use_gen> + #completed_where_clause + {} + + #frame_support::dispatch::ModuleConstantMetadata { + name: #frame_support::dispatch::DecodeDifferent::Encode(#ident_str), + ty: #frame_support::dispatch::DecodeDifferent::Encode(#const_type_str), + value: #frame_support::dispatch::DecodeDifferent::Encode( + #frame_support::dispatch::DefaultByteGetter( + &#default_byte_getter::<#type_use_gen>( + #frame_support::sp_std::marker::PhantomData ) - ), - documentation: #frame_support::dispatch::DecodeDifferent::Encode( - &[ #( #doc ),* ] - ), - } - }) - }); + ) + ), + documentation: #frame_support::dispatch::DecodeDifferent::Encode( + &[ #( #doc ),* ] + ), + } + }) + }); quote::quote!( impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause{ diff --git a/substrate/frame/support/procedural/src/pallet/expand/error.rs b/substrate/frame/support/procedural/src/pallet/expand/error.rs index 000f476d94d8bf47c6b85fe1e60cc264366c9164..ce3d3428fc6ee9d224a851330f094708589a58a9 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/error.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/error.rs @@ -15,16 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::{Def, parse::helper::get_doc_literals}; +use crate::pallet::{parse::helper::get_doc_literals, Def}; /// * impl various trait on Error /// * impl ModuleErrorMetadata for Error pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { - let error = if let Some(error) = &def.error { - error - } else { - return Default::default() - }; + let error = if let Some(error) = &def.error { error } else { return Default::default() }; let error_ident = &error.error; let frame_support = &def.frame_support; @@ -41,27 +37,24 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { ) ); - let as_u8_matches = error.variants.iter().enumerate() - .map(|(i, (variant, _))| { - quote::quote_spanned!(error.attr_span => Self::#variant => #i as u8,) - }); - - let as_str_matches = error.variants.iter() - .map(|(variant, _)| { - let variant_str = format!("{}", variant); - quote::quote_spanned!(error.attr_span => Self::#variant => #variant_str,) - }); - - let metadata = error.variants.iter() - .map(|(variant, doc)| { - let variant_str = format!("{}", variant); - quote::quote_spanned!(error.attr_span => - #frame_support::error::ErrorMetadata { - name: #frame_support::error::DecodeDifferent::Encode(#variant_str), - documentation: #frame_support::error::DecodeDifferent::Encode(&[ #( #doc, )* ]), - }, - ) - }); + let as_u8_matches = error.variants.iter().enumerate().map( + |(i, (variant, _))| quote::quote_spanned!(error.attr_span => Self::#variant => #i as u8,), + ); + + let as_str_matches = error.variants.iter().map(|(variant, _)| { + let variant_str = format!("{}", variant); + quote::quote_spanned!(error.attr_span => Self::#variant => #variant_str,) + }); + + let metadata = error.variants.iter().map(|(variant, doc)| { + let variant_str = format!("{}", variant); + quote::quote_spanned!(error.attr_span => + #frame_support::error::ErrorMetadata { + name: #frame_support::error::DecodeDifferent::Encode(#variant_str), + documentation: #frame_support::error::DecodeDifferent::Encode(&[ #( #doc, )* ]), + }, + ) + }); let error_item = { let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[error.index]; diff --git a/substrate/frame/support/procedural/src/pallet/expand/event.rs b/substrate/frame/support/procedural/src/pallet/expand/event.rs index d932206be09f46ea45efc6b9349f0ed812517ab9..08e59ae7e87789e50f1a5acb03aab93c38fc7d9d 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/event.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/event.rs @@ -15,8 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::{Def, parse::helper::get_doc_literals}; -use crate::COUNTER; +use crate::{ + pallet::{parse::helper::get_doc_literals, Def}, + COUNTER, +}; use syn::{spanned::Spanned, Ident}; /// * Add __Ignore variant on Event @@ -29,10 +31,8 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { let ident = Ident::new(&format!("__is_event_part_defined_{}", count), event.attr_span); (event, ident) } else { - let macro_ident = Ident::new( - &format!("__is_event_part_defined_{}", count), - def.item.span(), - ); + let macro_ident = + Ident::new(&format!("__is_event_part_defined_{}", count), def.item.span()); return quote::quote! { #[doc(hidden)] @@ -49,42 +49,39 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { )); } } - + #[doc(hidden)] pub use #macro_ident as is_event_part_defined; } - }; + } }; let event_where_clause = &event.where_clause; // NOTE: actually event where clause must be a subset of config where clause because of // `type Event: From>`. But we merge either way for potential better error message - let completed_where_clause = super::merge_where_clauses(&[ - &event.where_clause, - &def.config.where_clause, - ]); + let completed_where_clause = + super::merge_where_clauses(&[&event.where_clause, &def.config.where_clause]); let event_ident = &event.event; let frame_system = &def.frame_system; let frame_support = &def.frame_support; let event_use_gen = &event.gen_kind.type_use_gen(event.attr_span); - let event_impl_gen= &event.gen_kind.type_impl_gen(event.attr_span); - let metadata = event.metadata.iter() - .map(|(ident, args, docs)| { - let name = format!("{}", ident); - quote::quote_spanned!(event.attr_span => - #frame_support::event::EventMetadata { - name: #frame_support::event::DecodeDifferent::Encode(#name), - arguments: #frame_support::event::DecodeDifferent::Encode(&[ - #( #args, )* - ]), - documentation: #frame_support::event::DecodeDifferent::Encode(&[ - #( #docs, )* - ]), - }, - ) - }); + let event_impl_gen = &event.gen_kind.type_impl_gen(event.attr_span); + let metadata = event.metadata.iter().map(|(ident, args, docs)| { + let name = format!("{}", ident); + quote::quote_spanned!(event.attr_span => + #frame_support::event::EventMetadata { + name: #frame_support::event::DecodeDifferent::Encode(#name), + arguments: #frame_support::event::DecodeDifferent::Encode(&[ + #( #args, )* + ]), + documentation: #frame_support::event::DecodeDifferent::Encode(&[ + #( #docs, )* + ]), + }, + ) + }); let event_item = { let item = &mut def.item.content.as_mut().expect("Checked by def parser").1[event.index]; @@ -166,7 +163,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { macro_rules! #macro_ident { ($pallet_name:ident) => {}; } - + #[doc(hidden)] pub use #macro_ident as is_event_part_defined; } diff --git a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs index 374d21001d6a165b41fb5055513df5378571ca4a..c68f2339cfced3d722fdf8a981543eb9d4e74174 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/genesis_build.rs @@ -40,8 +40,8 @@ pub fn expand_genesis_build(def: &mut Def) -> proc_macro2::TokenStream { let gen_cfg_use_gen = genesis_config.gen_kind.type_use_gen(genesis_build.attr_span); - let genesis_build_item = &mut def.item.content.as_mut() - .expect("Checked by def parser").1[genesis_build.index]; + let genesis_build_item = + &mut def.item.content.as_mut().expect("Checked by def parser").1[genesis_build.index]; let genesis_build_item_impl = if let syn::Item::Impl(impl_) = genesis_build_item { impl_ diff --git a/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs b/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs index 013b9016c2f4b7276b37f2acc679cdcd91010878..b26be2b34aa7ae2007e224bb0c1bf41391d380cf 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -15,9 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::{Def, parse::helper::get_doc_literals}; -use crate::COUNTER; -use syn::{Ident, spanned::Spanned}; +use crate::{ + pallet::{parse::helper::get_doc_literals, Def}, + COUNTER, +}; +use syn::{spanned::Spanned, Ident}; /// * add various derive trait on GenesisConfig struct. pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { @@ -37,15 +39,11 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { (genesis_config, def_macro_ident, std_macro_ident) } else { - let def_macro_ident = Ident::new( - &format!("__is_genesis_config_defined_{}", count), - def.item.span(), - ); + let def_macro_ident = + Ident::new(&format!("__is_genesis_config_defined_{}", count), def.item.span()); - let std_macro_ident = Ident::new( - &format!("__is_std_enabled_for_genesis_{}", count), - def.item.span(), - ); + let std_macro_ident = + Ident::new(&format!("__is_std_enabled_for_genesis_{}", count), def.item.span()); return quote::quote! { #[doc(hidden)] @@ -74,18 +72,18 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { #[doc(hidden)] pub use #std_macro_ident as is_std_enabled_for_genesis; } - }; + } }; let frame_support = &def.frame_support; - let genesis_config_item = &mut def.item.content.as_mut() - .expect("Checked by def parser").1[genesis_config.index]; + let genesis_config_item = + &mut def.item.content.as_mut().expect("Checked by def parser").1[genesis_config.index]; let serde_crate = format!("{}::serde", frame_support); match genesis_config_item { - syn::Item::Enum(syn::ItemEnum { attrs, ..}) | + syn::Item::Enum(syn::ItemEnum { attrs, .. }) | syn::Item::Struct(syn::ItemStruct { attrs, .. }) | syn::Item::Type(syn::ItemType { attrs, .. }) => { if get_doc_literals(&attrs).is_empty() { diff --git a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs index 6e21c892d8ebb3545a4b8dbab63e0868cbd62779..c279a83d3daaf5ac008e0c4aa28ec046028034fa 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/hooks.rs @@ -59,7 +59,7 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { let hooks_impl = if def.hooks.is_none() { let frame_system = &def.frame_system; - quote::quote!{ + quote::quote! { impl<#type_impl_gen> #frame_support::traits::Hooks<::BlockNumber> for Pallet<#type_use_gen> {} diff --git a/substrate/frame/support/procedural/src/pallet/expand/inherent.rs b/substrate/frame/support/procedural/src/pallet/expand/inherent.rs index f1d58b28a5142cfd3f8d13452b6ad204c9b30b66..185211ecd4df2941d9d272bcb8749d82056c6364 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/inherent.rs @@ -15,11 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::Def; +use crate::{pallet::Def, COUNTER}; use proc_macro2::TokenStream; use quote::quote; -use crate::COUNTER; -use syn::{Ident, spanned::Spanned}; +use syn::{spanned::Spanned, Ident}; pub fn expand_inherents(def: &mut Def) -> TokenStream { let count = COUNTER.with(|counter| counter.borrow_mut().inc()); @@ -48,7 +47,7 @@ pub fn expand_inherents(def: &mut Def) -> TokenStream { #maybe_compile_error } } - + #[doc(hidden)] pub use #macro_ident as is_inherent_part_defined; } diff --git a/substrate/frame/support/procedural/src/pallet/expand/instances.rs b/substrate/frame/support/procedural/src/pallet/expand/instances.rs index 9f48563ab7e6c55d363f269fba673c3fb95cf46b..ceb86fcad7ea824420a4abea581cc02e614ed47f 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/instances.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/instances.rs @@ -15,9 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::{pallet::Def, NUMBER_OF_INSTANCE}; use proc_macro2::Span; -use crate::pallet::Def; -use crate::NUMBER_OF_INSTANCE; /// * Provide inherent instance to be used by construct_runtime /// * Provide Instance1 ..= Instance16 for instantiable pallet @@ -25,7 +24,9 @@ pub fn expand_instances(def: &mut Def) -> proc_macro2::TokenStream { let frame_support = &def.frame_support; let inherent_ident = syn::Ident::new(crate::INHERENT_INSTANCE_NAME, Span::call_site()); let instances = if def.config.has_instance { - (1..=NUMBER_OF_INSTANCE).map(|i| syn::Ident::new(&format!("Instance{}", i), Span::call_site())).collect() + (1..=NUMBER_OF_INSTANCE) + .map(|i| syn::Ident::new(&format!("Instance{}", i), Span::call_site())) + .collect() } else { vec![] }; diff --git a/substrate/frame/support/procedural/src/pallet/expand/mod.rs b/substrate/frame/support/procedural/src/pallet/expand/mod.rs index f3a42dfa868b233e733c5890e6c06b2225b79aa0..cfb61e700ac293641e6a3d5b03fc4fd80482ed72 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/mod.rs @@ -15,24 +15,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod constants; -mod pallet_struct; mod call; mod config; +mod constants; mod error; mod event; -mod storage; +mod genesis_build; +mod genesis_config; mod hooks; -mod store_trait; mod inherent; mod instances; -mod genesis_build; -mod genesis_config; -mod type_value; mod origin; +mod pallet_struct; +mod storage; +mod store_trait; +mod type_value; mod validate_unsigned; -use crate::pallet::{Def, parse::helper::get_doc_literals}; +use crate::pallet::{parse::helper::get_doc_literals, Def}; use quote::ToTokens; /// Merge where clause together, `where` token span is taken from the first not none one. @@ -97,7 +97,11 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { #validate_unsigned ); - def.item.content.as_mut().expect("This is checked by parsing").1 + def.item + .content + .as_mut() + .expect("This is checked by parsing") + .1 .push(syn::Item::Verbatim(new_items)); def.item.into_token_stream() diff --git a/substrate/frame/support/procedural/src/pallet/expand/origin.rs b/substrate/frame/support/procedural/src/pallet/expand/origin.rs index 578c641b43e41e4efc9ffab1cf42acb9228ed4c1..987512f69a02b2e4116d2f315dc20df251b7eedb 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/origin.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/origin.rs @@ -18,7 +18,7 @@ use crate::{pallet::Def, COUNTER}; use proc_macro2::TokenStream; use quote::quote; -use syn::{Ident, spanned::Spanned}; +use syn::{spanned::Spanned, Ident}; pub fn expand_origins(def: &mut Def) -> TokenStream { let count = COUNTER.with(|counter| counter.borrow_mut().inc()); @@ -47,7 +47,7 @@ pub fn expand_origins(def: &mut Def) -> TokenStream { #maybe_compile_error } } - + #[doc(hidden)] pub use #macro_ident as is_origin_part_defined; } diff --git a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs index 3be9d60492e9d5522260e924bbeab92d20a9d955..8be933fc3cf9d936f6aeccd95361548ca609cf0b 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::{Def, expand::merge_where_clauses, parse::helper::get_doc_literals}; +use crate::pallet::{expand::merge_where_clauses, parse::helper::get_doc_literals, Def}; /// * Add derive trait on Pallet /// * Implement GetPalletVersion on Pallet @@ -104,29 +104,25 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { // Depending on the flag `generate_storage_info` we use partial or full storage info from // storage. - let ( - storage_info_span, - storage_info_trait, - storage_info_method, - ) = if let Some(span) = def.pallet_struct.generate_storage_info { - ( - span, - quote::quote_spanned!(span => StorageInfoTrait), - quote::quote_spanned!(span => storage_info), - ) - } else { - let span = def.pallet_struct.attr_span; - ( - span, - quote::quote_spanned!(span => PartialStorageInfoTrait), - quote::quote_spanned!(span => partial_storage_info), - ) - }; + let (storage_info_span, storage_info_trait, storage_info_method) = + if let Some(span) = def.pallet_struct.generate_storage_info { + ( + span, + quote::quote_spanned!(span => StorageInfoTrait), + quote::quote_spanned!(span => storage_info), + ) + } else { + let span = def.pallet_struct.attr_span; + ( + span, + quote::quote_spanned!(span => PartialStorageInfoTrait), + quote::quote_spanned!(span => partial_storage_info), + ) + }; let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); - let storage_cfg_attrs = &def.storages.iter() - .map(|storage| &storage.cfg_attrs) - .collect::>(); + let storage_cfg_attrs = + &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); let storage_info = quote::quote_spanned!(storage_info_span => impl<#type_impl_gen> #frame_support::traits::StorageInfoTrait diff --git a/substrate/frame/support/procedural/src/pallet/expand/storage.rs b/substrate/frame/support/procedural/src/pallet/expand/storage.rs index 0000051dd9b94c054b7c05f246ddd991c0e34467..21d6628c8b84d5e53ed9f20393b6c737a2b5f980 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/storage.rs @@ -15,8 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::{Def, parse::storage::StorageDef}; -use crate::pallet::parse::storage::{Metadata, QueryKind, StorageGenerics}; +use crate::pallet::{ + parse::storage::{Metadata, QueryKind, StorageDef, StorageGenerics}, + Def, +}; use frame_support_procedural_tools::clean_type_string; use std::collections::HashSet; @@ -30,10 +32,7 @@ fn prefix_ident(storage: &StorageDef) -> syn::Ident { /// Check for duplicated storage prefixes. This step is necessary since users can specify an /// alternative storage prefix using the #[pallet::storage_prefix] syntax, and we need to ensure /// that the prefix specified by the user is not a duplicate of an existing one. -fn check_prefix_duplicates( - storage_def: &StorageDef, - set: &mut HashSet, -) -> syn::Result<()> { +fn check_prefix_duplicates(storage_def: &StorageDef, set: &mut HashSet) -> syn::Result<()> { let prefix = storage_def.prefix(); if !set.insert(prefix.clone()) { @@ -41,7 +40,7 @@ fn check_prefix_duplicates( storage_def.prefix_span(), format!("Duplicate storage prefixes found for `{}`", prefix), ); - return Err(err); + return Err(err) } Ok(()) @@ -85,10 +84,8 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { let default_query_kind: syn::Type = syn::parse_quote!(#frame_support::storage::types::OptionQuery); - let default_on_empty: syn::Type = - syn::parse_quote!(#frame_support::traits::GetDefault); - let default_max_values: syn::Type = - syn::parse_quote!(#frame_support::traits::GetDefault); + let default_on_empty: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); + let default_max_values: syn::Type = syn::parse_quote!(#frame_support::traits::GetDefault); if let Some(named_generics) = storage_def.named_generics.clone() { args.args.clear(); @@ -100,7 +97,7 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { args.args.push(syn::GenericArgument::Type(query_kind)); let on_empty = on_empty.unwrap_or_else(|| default_on_empty.clone()); args.args.push(syn::GenericArgument::Type(on_empty)); - } + }, StorageGenerics::Map { hasher, key, value, query_kind, on_empty, max_values } => { args.args.push(syn::GenericArgument::Type(hasher)); args.args.push(syn::GenericArgument::Type(key)); @@ -111,9 +108,16 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); - } + }, StorageGenerics::DoubleMap { - hasher1, key1, hasher2, key2, value, query_kind, on_empty, max_values, + hasher1, + key1, + hasher2, + key2, + value, + query_kind, + on_empty, + max_values, } => { args.args.push(syn::GenericArgument::Type(hasher1)); args.args.push(syn::GenericArgument::Type(key1)); @@ -126,8 +130,8 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); - } - StorageGenerics::NMap { keygen, value, query_kind, on_empty, max_values, } => { + }, + StorageGenerics::NMap { keygen, value, query_kind, on_empty, max_values } => { args.args.push(syn::GenericArgument::Type(keygen)); args.args.push(syn::GenericArgument::Type(value)); let query_kind = query_kind.unwrap_or_else(|| default_query_kind.clone()); @@ -136,7 +140,7 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { args.args.push(syn::GenericArgument::Type(on_empty)); let max_values = max_values.unwrap_or_else(|| default_max_values.clone()); args.args.push(syn::GenericArgument::Type(max_values)); - } + }, } } else { args.args[0] = syn::parse_quote!( #prefix_ident<#type_use_gen> ); @@ -154,118 +158,116 @@ pub fn process_generics(def: &mut Def) -> syn::Result<()> { /// * generate metadatas pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { if let Err(e) = process_generics(def) { - return e.into_compile_error().into(); + return e.into_compile_error().into() } let frame_support = &def.frame_support; let frame_system = &def.frame_system; let pallet_ident = &def.pallet_struct.pallet; + let entries = def.storages.iter().map(|storage| { + let docs = &storage.docs; + + let ident = &storage.ident; + let gen = &def.type_use_generics(storage.attr_span); + let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); + + let cfg_attrs = &storage.cfg_attrs; + + let metadata_trait = match &storage.metadata { + Metadata::Value { .. } => quote::quote_spanned!(storage.attr_span => + #frame_support::storage::types::StorageValueMetadata + ), + Metadata::Map { .. } => quote::quote_spanned!(storage.attr_span => + #frame_support::storage::types::StorageMapMetadata + ), + Metadata::DoubleMap { .. } => quote::quote_spanned!(storage.attr_span => + #frame_support::storage::types::StorageDoubleMapMetadata + ), + Metadata::NMap { .. } => quote::quote_spanned!(storage.attr_span => + #frame_support::storage::types::StorageNMapMetadata + ), + }; - let entries = def.storages.iter() - .map(|storage| { - let docs = &storage.docs; - - let ident = &storage.ident; - let gen = &def.type_use_generics(storage.attr_span); - let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); - - let cfg_attrs = &storage.cfg_attrs; + let ty = match &storage.metadata { + Metadata::Value { value } => { + let value = clean_type_string("e::quote!(#value).to_string()); + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryType::Plain( + #frame_support::metadata::DecodeDifferent::Encode(#value) + ) + ) + }, + Metadata::Map { key, value } => { + let value = clean_type_string("e::quote!(#value).to_string()); + let key = clean_type_string("e::quote!(#key).to_string()); + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryType::Map { + hasher: <#full_ident as #metadata_trait>::HASHER, + key: #frame_support::metadata::DecodeDifferent::Encode(#key), + value: #frame_support::metadata::DecodeDifferent::Encode(#value), + unused: false, + } + ) + }, + Metadata::DoubleMap { key1, key2, value } => { + let value = clean_type_string("e::quote!(#value).to_string()); + let key1 = clean_type_string("e::quote!(#key1).to_string()); + let key2 = clean_type_string("e::quote!(#key2).to_string()); + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryType::DoubleMap { + hasher: <#full_ident as #metadata_trait>::HASHER1, + key2_hasher: <#full_ident as #metadata_trait>::HASHER2, + key1: #frame_support::metadata::DecodeDifferent::Encode(#key1), + key2: #frame_support::metadata::DecodeDifferent::Encode(#key2), + value: #frame_support::metadata::DecodeDifferent::Encode(#value), + } + ) + }, + Metadata::NMap { keys, value, .. } => { + let keys = keys + .iter() + .map(|key| clean_type_string("e::quote!(#key).to_string())) + .collect::>(); + let value = clean_type_string("e::quote!(#value).to_string()); + quote::quote_spanned!(storage.attr_span => + #frame_support::metadata::StorageEntryType::NMap { + keys: #frame_support::metadata::DecodeDifferent::Encode(&[ + #( #keys, )* + ]), + hashers: #frame_support::metadata::DecodeDifferent::Encode( + <#full_ident as #metadata_trait>::HASHERS, + ), + value: #frame_support::metadata::DecodeDifferent::Encode(#value), + } + ) + }, + }; - let metadata_trait = match &storage.metadata { - Metadata::Value { .. } => quote::quote_spanned!(storage.attr_span => - #frame_support::storage::types::StorageValueMetadata - ), - Metadata::Map { .. } => quote::quote_spanned!(storage.attr_span => - #frame_support::storage::types::StorageMapMetadata + quote::quote_spanned!(storage.attr_span => + #(#cfg_attrs)* #frame_support::metadata::StorageEntryMetadata { + name: #frame_support::metadata::DecodeDifferent::Encode( + <#full_ident as #metadata_trait>::NAME ), - Metadata::DoubleMap { .. } => quote::quote_spanned!(storage.attr_span => - #frame_support::storage::types::StorageDoubleMapMetadata + modifier: <#full_ident as #metadata_trait>::MODIFIER, + ty: #ty, + default: #frame_support::metadata::DecodeDifferent::Encode( + <#full_ident as #metadata_trait>::DEFAULT ), - Metadata::NMap { .. } => quote::quote_spanned!(storage.attr_span => - #frame_support::storage::types::StorageNMapMetadata - ), - }; - - let ty = match &storage.metadata { - Metadata::Value { value } => { - let value = clean_type_string("e::quote!(#value).to_string()); - quote::quote_spanned!(storage.attr_span => - #frame_support::metadata::StorageEntryType::Plain( - #frame_support::metadata::DecodeDifferent::Encode(#value) - ) - ) - }, - Metadata::Map { key, value } => { - let value = clean_type_string("e::quote!(#value).to_string()); - let key = clean_type_string("e::quote!(#key).to_string()); - quote::quote_spanned!(storage.attr_span => - #frame_support::metadata::StorageEntryType::Map { - hasher: <#full_ident as #metadata_trait>::HASHER, - key: #frame_support::metadata::DecodeDifferent::Encode(#key), - value: #frame_support::metadata::DecodeDifferent::Encode(#value), - unused: false, - } - ) - }, - Metadata::DoubleMap { key1, key2, value } => { - let value = clean_type_string("e::quote!(#value).to_string()); - let key1 = clean_type_string("e::quote!(#key1).to_string()); - let key2 = clean_type_string("e::quote!(#key2).to_string()); - quote::quote_spanned!(storage.attr_span => - #frame_support::metadata::StorageEntryType::DoubleMap { - hasher: <#full_ident as #metadata_trait>::HASHER1, - key2_hasher: <#full_ident as #metadata_trait>::HASHER2, - key1: #frame_support::metadata::DecodeDifferent::Encode(#key1), - key2: #frame_support::metadata::DecodeDifferent::Encode(#key2), - value: #frame_support::metadata::DecodeDifferent::Encode(#value), - } - ) - }, - Metadata::NMap { keys, value, .. } => { - let keys = keys - .iter() - .map(|key| clean_type_string("e::quote!(#key).to_string())) - .collect::>(); - let value = clean_type_string("e::quote!(#value).to_string()); - quote::quote_spanned!(storage.attr_span => - #frame_support::metadata::StorageEntryType::NMap { - keys: #frame_support::metadata::DecodeDifferent::Encode(&[ - #( #keys, )* - ]), - hashers: #frame_support::metadata::DecodeDifferent::Encode( - <#full_ident as #metadata_trait>::HASHERS, - ), - value: #frame_support::metadata::DecodeDifferent::Encode(#value), - } - ) - } - }; + documentation: #frame_support::metadata::DecodeDifferent::Encode(&[ + #( #docs, )* + ]), + } + ) + }); - quote::quote_spanned!(storage.attr_span => - #(#cfg_attrs)* #frame_support::metadata::StorageEntryMetadata { - name: #frame_support::metadata::DecodeDifferent::Encode( - <#full_ident as #metadata_trait>::NAME - ), - modifier: <#full_ident as #metadata_trait>::MODIFIER, - ty: #ty, - default: #frame_support::metadata::DecodeDifferent::Encode( - <#full_ident as #metadata_trait>::DEFAULT - ), - documentation: #frame_support::metadata::DecodeDifferent::Encode(&[ - #( #docs, )* - ]), - } - ) - }); - - let getters = def.storages.iter() - .map(|storage| if let Some(getter) = &storage.getter { - let completed_where_clause = super::merge_where_clauses(&[ - &storage.where_clause, - &def.config.where_clause, - ]); - let docs = storage.docs.iter() + let getters = def.storages.iter().map(|storage| { + if let Some(getter) = &storage.getter { + let completed_where_clause = + super::merge_where_clauses(&[&storage.where_clause, &def.config.where_clause]); + let docs = storage + .docs + .iter() .map(|d| quote::quote_spanned!(storage.attr_span => #[doc = #d])); let ident = &storage.ident; @@ -365,11 +367,12 @@ pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream { } } ) - } + }, } } else { Default::default() - }); + } + }); let prefix_structs = def.storages.iter().map(|storage_def| { let type_impl_gen = &def.type_impl_generics(storage_def.attr_span); diff --git a/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs b/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs index 81ed52ac87a68ec0cb11ca1f21c521228f35a08f..36cc08b732fe5a04e0640e7cae1444a2f8635752 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/store_trait.rs @@ -22,11 +22,8 @@ use syn::spanned::Spanned; /// * generate Store trait with all storages, /// * implement Store trait for Pallet. pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { - let (trait_vis, trait_store) = if let Some(store) = &def.pallet_struct.store { - store - } else { - return Default::default() - }; + let (trait_vis, trait_store) = + if let Some(store) = &def.pallet_struct.store { store } else { return Default::default() }; let type_impl_gen = &def.type_impl_generics(trait_store.span()); let type_use_gen = &def.type_use_generics(trait_store.span()); @@ -37,7 +34,8 @@ pub fn expand_store_trait(def: &mut Def) -> proc_macro2::TokenStream { let completed_where_clause = super::merge_where_clauses(&where_clauses); let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); - let storage_cfg_attrs = &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); + let storage_cfg_attrs = + &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); quote::quote_spanned!(trait_store.span() => #trait_vis trait #trait_store { diff --git a/substrate/frame/support/procedural/src/pallet/expand/validate_unsigned.rs b/substrate/frame/support/procedural/src/pallet/expand/validate_unsigned.rs index 1abf7d893b933733748af89a0b56070d9d92d3e6..5f30d712e9a515cdbadd092944459a09902caa92 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/validate_unsigned.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/validate_unsigned.rs @@ -15,15 +15,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::pallet::Def; +use crate::{pallet::Def, COUNTER}; use proc_macro2::TokenStream; use quote::quote; -use crate::COUNTER; -use syn::{Ident, spanned::Spanned}; +use syn::{spanned::Spanned, Ident}; pub fn expand_validate_unsigned(def: &mut Def) -> TokenStream { let count = COUNTER.with(|counter| counter.borrow_mut().inc()); - let macro_ident = Ident::new(&format!("__is_validate_unsigned_part_defined_{}", count), def.item.span()); + let macro_ident = + Ident::new(&format!("__is_validate_unsigned_part_defined_{}", count), def.item.span()); let maybe_compile_error = if def.validate_unsigned.is_none() { quote! { @@ -48,7 +48,7 @@ pub fn expand_validate_unsigned(def: &mut Def) -> TokenStream { #maybe_compile_error } } - + #[doc(hidden)] pub use #macro_ident as is_validate_unsigned_part_defined; } diff --git a/substrate/frame/support/procedural/src/pallet/mod.rs b/substrate/frame/support/procedural/src/pallet/mod.rs index 560d57d50e03749e9736a3eeb910fb59d0b8c09b..93797906d04d9528e3dee7bc9dc74ac0eb608ee5 100644 --- a/substrate/frame/support/procedural/src/pallet/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/mod.rs @@ -25,21 +25,22 @@ //! This step will modify the ItemMod by adding some derive attributes or phantom data variants //! to user defined types. And also crate new types and implement block. -mod parse; mod expand; +mod parse; pub use parse::Def; use syn::spanned::Spanned; pub fn pallet( attr: proc_macro::TokenStream, - item: proc_macro::TokenStream + item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { if !attr.is_empty() { - let msg = "Invalid pallet macro call: expected no attributes, e.g. macro call must be just \ + let msg = + "Invalid pallet macro call: expected no attributes, e.g. macro call must be just \ `#[frame_support::pallet]` or `#[pallet]`"; let span = proc_macro2::TokenStream::from(attr).span(); - return syn::Error::new(span, msg).to_compile_error().into(); + return syn::Error::new(span, msg).to_compile_error().into() } let item = syn::parse_macro_input!(item as syn::ItemMod); diff --git a/substrate/frame/support/procedural/src/pallet/parse/call.rs b/substrate/frame/support/procedural/src/pallet/parse/call.rs index 299b86cf6f84eb2e834fa1d9c0cbd623a89f51d6..d022e8025aab24fb544071546cf2a2b3037c05da 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/call.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/call.rs @@ -75,9 +75,7 @@ impl syn::parse::Parse for FunctionAttr { let weight_content; syn::parenthesized!(weight_content in content); - Ok(FunctionAttr { - weight: weight_content.parse::()?, - }) + Ok(FunctionAttr { weight: weight_content.parse::()? }) } } @@ -100,7 +98,6 @@ impl syn::parse::Parse for ArgAttrIsCompact { /// Check the syntax is `OriginFor` pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { - pub struct CheckDispatchableFirstArg; impl syn::parse::Parse for CheckDispatchableFirstArg { fn parse(input: syn::parse::ParseStream) -> syn::Result { @@ -113,13 +110,12 @@ pub fn check_dispatchable_first_arg_type(ty: &syn::Type) -> syn::Result<()> { } } - syn::parse2::(ty.to_token_stream()) - .map_err(|e| { - let msg = "Invalid type: expected `OriginFor`"; - let mut err = syn::Error::new(ty.span(), msg); - err.combine(e); - err - })?; + syn::parse2::(ty.to_token_stream()).map_err(|e| { + let msg = "Invalid type: expected `OriginFor`"; + let mut err = syn::Error::new(ty.span(), msg); + err.combine(e); + err + })?; Ok(()) } @@ -128,12 +124,12 @@ impl CallDef { pub fn try_from( attr_span: proc_macro2::Span, index: usize, - item: &mut syn::Item + item: &mut syn::Item, ) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")); + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) }; let mut instances = vec![]; @@ -158,18 +154,18 @@ impl CallDef { _ => method.vis.span(), }; - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } match method.sig.inputs.first() { None => { let msg = "Invalid pallet::call, must have at least origin arg"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Receiver(_)) => { let msg = "Invalid pallet::call, first argument must be a typed argument, \ e.g. `origin: OriginFor`"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) }, Some(syn::FnArg::Typed(arg)) => { check_dispatchable_first_arg_type(&*arg.ty)?; @@ -181,7 +177,7 @@ impl CallDef { } else { let msg = "Invalid pallet::call, require return type \ DispatchResultWithPostInfo"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) } let mut call_var_attrs: Vec = @@ -193,7 +189,7 @@ impl CallDef { } else { "Invalid pallet::call, too many weight attributes given" }; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) } let weight = call_var_attrs.pop().unwrap().weight; @@ -210,14 +206,14 @@ impl CallDef { if arg_attrs.len() > 1 { let msg = "Invalid pallet::call, argument has too many attributes"; - return Err(syn::Error::new(arg.span(), msg)); + return Err(syn::Error::new(arg.span(), msg)) } let arg_ident = if let syn::Pat::Ident(pat) = &*arg.pat { pat.ident.clone() } else { let msg = "Invalid pallet::call, argument must be ident"; - return Err(syn::Error::new(arg.pat.span(), msg)); + return Err(syn::Error::new(arg.pat.span(), msg)) }; args.push((!arg_attrs.is_empty(), arg_ident, arg.ty.clone())); @@ -225,15 +221,10 @@ impl CallDef { let docs = helper::get_doc_literals(&method.attrs); - methods.push(CallVariantDef { - name: method.sig.ident.clone(), - weight, - args, - docs, - }); + methods.push(CallVariantDef { name: method.sig.ident.clone(), weight, args, docs }); } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(impl_item.span(), msg)); + return Err(syn::Error::new(impl_item.span(), msg)) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/config.rs b/substrate/frame/support/procedural/src/pallet/parse/config.rs index 69dfaeb7f9e9b078e23ab68f126c57a2db64d979..b006aadf51a072cbe5f5b7440b8e6f556b1a9cd4 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/config.rs @@ -17,8 +17,8 @@ use super::helper; use core::convert::TryFrom; -use syn::spanned::Spanned; use quote::ToTokens; +use syn::spanned::Spanned; /// List of additional token to be used for parsing. mod keyword { @@ -66,23 +66,26 @@ impl TryFrom<&syn::TraitItemType> for ConstMetadataDef { type Error = syn::Error; fn try_from(trait_ty: &syn::TraitItemType) -> Result { - let err = |span, msg| - syn::Error::new(span, format!("Invalid usage of `#[pallet::constant]`: {}", msg)); + let err = |span, msg| { + syn::Error::new(span, format!("Invalid usage of `#[pallet::constant]`: {}", msg)) + }; let doc = helper::get_doc_literals(&trait_ty.attrs); let ident = trait_ty.ident.clone(); - let bound = trait_ty.bounds + let bound = trait_ty + .bounds .iter() - .find_map(|b| + .find_map(|b| { if let syn::TypeParamBound::Trait(tb) = b { - tb.path.segments + tb.path + .segments .last() - .and_then(|s| if s.ident == "Get" { Some(s) } else { None } ) + .and_then(|s| if s.ident == "Get" { Some(s) } else { None }) } else { None } - ) + }) .ok_or_else(|| err(trait_ty.span(), "`Get` trait bound not found"))?; - let type_arg = if let syn::PathArguments::AngleBracketed (ref ab) = bound.arguments { + let type_arg = if let syn::PathArguments::AngleBracketed(ref ab) = bound.arguments { if ab.args.len() == 1 { if let syn::GenericArgument::Type(ref ty) = ab.args[0] { Ok(ty) @@ -214,15 +217,15 @@ impl syn::parse::Parse for FromEventParse { fn check_event_type( frame_system: &syn::Ident, trait_item: &syn::TraitItem, - trait_has_instance: bool -) -> syn::Result { + trait_has_instance: bool, +) -> syn::Result { if let syn::TraitItem::Type(type_) = trait_item { if type_.ident == "Event" { // Check event has no generics if !type_.generics.params.is_empty() || type_.generics.where_clause.is_some() { let msg = "Invalid `type Event`, associated type `Event` is reserved and must have\ no generics nor where_clause"; - return Err(syn::Error::new(trait_item.span(), msg)); + return Err(syn::Error::new(trait_item.span(), msg)) } // Check bound contains IsType and From @@ -237,28 +240,28 @@ fn check_event_type( bound: `IsType<::Event>`", frame_system, ); - return Err(syn::Error::new(type_.span(), msg)); + return Err(syn::Error::new(type_.span(), msg)) } - let from_event_bound = type_.bounds.iter().find_map(|s| { - syn::parse2::(s.to_token_stream()).ok() - }); + let from_event_bound = type_ + .bounds + .iter() + .find_map(|s| syn::parse2::(s.to_token_stream()).ok()); let from_event_bound = if let Some(b) = from_event_bound { b } else { let msg = "Invalid `type Event`, associated type `Event` is reserved and must \ bound: `From` or `From>` or `From>`"; - return Err(syn::Error::new(type_.span(), msg)); + return Err(syn::Error::new(type_.span(), msg)) }; - if from_event_bound.is_generic - && (from_event_bound.has_instance != trait_has_instance) + if from_event_bound.is_generic && (from_event_bound.has_instance != trait_has_instance) { let msg = "Invalid `type Event`, associated type `Event` bounds inconsistent \ `From`. Config and generic Event must be both with instance or \ without instance"; - return Err(syn::Error::new(type_.span(), msg)); + return Err(syn::Error::new(type_.span(), msg)) } Ok(true) @@ -272,16 +275,14 @@ fn check_event_type( /// Replace ident `Self` by `T` pub fn replace_self_by_t(input: proc_macro2::TokenStream) -> proc_macro2::TokenStream { - input.into_iter() + input + .into_iter() .map(|token_tree| match token_tree { proc_macro2::TokenTree::Group(group) => - proc_macro2::Group::new( - group.delimiter(), - replace_self_by_t(group.stream()) - ).into(), + proc_macro2::Group::new(group.delimiter(), replace_self_by_t(group.stream())).into(), proc_macro2::TokenTree::Ident(ident) if ident == "Self" => proc_macro2::Ident::new("T", ident.span()).into(), - other => other + other => other, }) .collect() } @@ -297,27 +298,27 @@ impl ConfigDef { item } else { let msg = "Invalid pallet::config, expected trait definition"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::config, trait must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } syn::parse2::(item.ident.to_token_stream())?; - let where_clause = { let stream = replace_self_by_t(item.generics.where_clause.to_token_stream()); - syn::parse2::>(stream) - .expect("Internal error: replacing `Self` by `T` should result in valid where - clause") + syn::parse2::>(stream).expect( + "Internal error: replacing `Self` by `T` should result in valid where + clause", + ) }; if item.generics.params.len() > 1 { let msg = "Invalid pallet::config, expected no more than one generic"; - return Err(syn::Error::new(item.generics.params[2].span(), msg)); + return Err(syn::Error::new(item.generics.params[2].span(), msg)) } let has_instance = if item.generics.params.first().is_some() { @@ -331,15 +332,15 @@ impl ConfigDef { let mut consts_metadata = vec![]; for trait_item in &mut item.items { // Parse for event - has_event_type = has_event_type - || check_event_type(frame_system, trait_item, has_instance)?; + has_event_type = + has_event_type || check_event_type(frame_system, trait_item, has_instance)?; // Parse for constant let type_attrs_const: Vec = helper::take_item_pallet_attrs(trait_item)?; if type_attrs_const.len() > 1 { let msg = "Invalid attribute in pallet::config, only one attribute is expected"; - return Err(syn::Error::new(type_attrs_const[1].span(), msg)); + return Err(syn::Error::new(type_attrs_const[1].span(), msg)) } if type_attrs_const.len() == 1 { @@ -349,17 +350,17 @@ impl ConfigDef { consts_metadata.push(constant); }, _ => { - let msg = "Invalid pallet::constant in pallet::config, expected type trait \ + let msg = + "Invalid pallet::constant in pallet::config, expected type trait \ item"; - return Err(syn::Error::new(trait_item.span(), msg)); + return Err(syn::Error::new(trait_item.span(), msg)) }, } } } - let attr: Option = helper::take_first_item_pallet_attr( - &mut item.attrs - )?; + let attr: Option = + helper::take_first_item_pallet_attr(&mut item.attrs)?; let disable_system_supertrait_check = attr.is_some(); @@ -372,10 +373,9 @@ impl ConfigDef { let found = if item.supertraits.is_empty() { "none".to_string() } else { - let mut found = item.supertraits.iter() - .fold(String::new(), |acc, s| { - format!("{}`{}`, ", acc, quote::quote!(#s).to_string()) - }); + let mut found = item.supertraits.iter().fold(String::new(), |acc, s| { + format!("{}`{}`, ", acc, quote::quote!(#s).to_string()) + }); found.pop(); found.pop(); found @@ -387,19 +387,11 @@ impl ConfigDef { (try `pub trait Config: frame_system::Config {{ ...` or \ `pub trait Config: frame_system::Config {{ ...`). \ To disable this check, use `#[pallet::disable_frame_system_supertrait_check]`", - frame_system, - found, + frame_system, found, ); - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } - Ok(Self { - index, - has_instance, - consts_metadata, - has_event_type, - where_clause, - attr_span, - }) + Ok(Self { index, has_instance, consts_metadata, has_event_type, where_clause, attr_span }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/error.rs b/substrate/frame/support/procedural/src/pallet/parse/error.rs index 49aaebc87f428abf145125f429276ef8553277e9..9b96a1876917d4e2cf797424da37e98057e60173 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/error.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/error.rs @@ -16,8 +16,8 @@ // limitations under the License. use super::helper; -use syn::spanned::Spanned; use quote::ToTokens; +use syn::spanned::Spanned; /// List of additional token to be used for parsing. mod keyword { @@ -48,11 +48,11 @@ impl ErrorDef { let item = if let syn::Item::Enum(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")); + return Err(syn::Error::new(item.span(), "Invalid pallet::error, expected item enum")) }; if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::error, `Error` must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let mut instances = vec![]; @@ -60,34 +60,30 @@ impl ErrorDef { if item.generics.where_clause.is_some() { let msg = "Invalid pallet::error, where clause is not allowed on pallet error item"; - return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)); + return Err(syn::Error::new(item.generics.where_clause.as_ref().unwrap().span(), msg)) } let error = syn::parse2::(item.ident.to_token_stream())?; - let variants = item.variants.iter() + let variants = item + .variants + .iter() .map(|variant| { if !matches!(variant.fields, syn::Fields::Unit) { let msg = "Invalid pallet::error, unexpected fields, must be `Unit`"; - return Err(syn::Error::new(variant.fields.span(), msg)); + return Err(syn::Error::new(variant.fields.span(), msg)) } if variant.discriminant.is_some() { let msg = "Invalid pallet::error, unexpected discriminant, discriminant \ are not supported"; let span = variant.discriminant.as_ref().unwrap().0.span(); - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } Ok((variant.ident.clone(), helper::get_doc_literals(&variant.attrs))) }) .collect::>()?; - Ok(ErrorDef { - attr_span, - index, - variants, - instances, - error, - }) + Ok(ErrorDef { attr_span, index, variants, instances, error }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/event.rs b/substrate/frame/support/procedural/src/pallet/parse/event.rs index e5aad2b5b5d2cfb33d9bcaeb7352034b796f6523..1bec2d775f8593268e010a0ecd7e641072c2f5b3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/event.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/event.rs @@ -16,9 +16,9 @@ // limitations under the License. use super::helper; -use syn::spanned::Spanned; -use quote::ToTokens; use frame_support_procedural_tools::clean_type_string; +use quote::ToTokens; +use syn::spanned::Spanned; /// List of additional token to be used for parsing. mod keyword { @@ -80,7 +80,7 @@ impl PalletEventAttr { /// Parse for syntax `$Type = "$SomeString"`. fn parse_event_metadata_element( - input: syn::parse::ParseStream + input: syn::parse::ParseStream, ) -> syn::Result<(syn::Type, String)> { let typ = input.parse::()?; input.parse::()?; @@ -118,7 +118,6 @@ impl syn::parse::Parse for PalletEventAttr { generate_content.parse::()?; let fn_span = generate_content.parse::()?.span(); - Ok(PalletEventAttr::DepositEvent { fn_vis, span, fn_span }) } else { Err(lookahead.error()) @@ -139,11 +138,10 @@ impl PalletEventAttrInfo { match attr { PalletEventAttr::Metadata { metadata: m, .. } if metadata.is_none() => metadata = Some(m), - PalletEventAttr::DepositEvent { fn_vis, fn_span, .. } if deposit_event.is_none() => + PalletEventAttr::DepositEvent { fn_vis, fn_span, .. } + if deposit_event.is_none() => deposit_event = Some((fn_vis, fn_span)), - attr => { - return Err(syn::Error::new(attr.span(), "Duplicate attribute")); - } + attr => return Err(syn::Error::new(attr.span(), "Duplicate attribute")), } } @@ -170,7 +168,7 @@ impl EventDef { if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::event, `Error` must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let where_clause = item.generics.where_clause.clone(); @@ -182,10 +180,7 @@ impl EventDef { instances.push(u); } else { // construct_runtime only allow non generic event for non instantiable pallet. - instances.push(helper::InstanceUsage { - has_instance: false, - span: item.ident.span(), - }) + instances.push(helper::InstanceUsage { has_instance: false, span: item.ident.span() }) } let has_instance = item.generics.type_params().any(|t| t.ident == "I"); @@ -195,13 +190,19 @@ impl EventDef { let event = syn::parse2::(item.ident.to_token_stream())?; - let metadata = item.variants.iter() + let metadata = item + .variants + .iter() .map(|variant| { let name = variant.ident.clone(); let docs = helper::get_doc_literals(&variant.attrs); - let args = variant.fields.iter() + let args = variant + .fields + .iter() .map(|field| { - metadata.iter().find(|m| m.0 == field.ty) + metadata + .iter() + .find(|m| m.0 == field.ty) .map(|m| m.1.clone()) .unwrap_or_else(|| { clean_type_string(&field.ty.to_token_stream().to_string()) diff --git a/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs b/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs index 430bf94783774f7d4e831c0e3e442793879b0d2d..71208f3329a107872e5e6189fb107f1c378fb5d9 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -52,14 +52,11 @@ pub struct ExtraConstantDef { } impl ExtraConstantsDef { - pub fn try_from( - index: usize, - item: &mut syn::Item - ) -> syn::Result { + pub fn try_from(index: usize, item: &mut syn::Item) -> syn::Result { let item = if let syn::Item::Impl(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")); + return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) }; let mut instances = vec![]; @@ -78,28 +75,28 @@ impl ExtraConstantsDef { method } else { let msg = "Invalid pallet::call, only method accepted"; - return Err(syn::Error::new(impl_item.span(), msg)); + return Err(syn::Error::new(impl_item.span(), msg)) }; if !method.sig.inputs.is_empty() { let msg = "Invalid pallet::extra_constants, method must have 0 args"; - return Err(syn::Error::new(method.sig.span(), msg)); + return Err(syn::Error::new(method.sig.span(), msg)) } if !method.sig.generics.params.is_empty() { let msg = "Invalid pallet::extra_constants, method must have 0 generics"; - return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)); + return Err(syn::Error::new(method.sig.generics.params[0].span(), msg)) } if method.sig.generics.where_clause.is_some() { let msg = "Invalid pallet::extra_constants, method must have no where clause"; - return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)); + return Err(syn::Error::new(method.sig.generics.where_clause.span(), msg)) } let type_ = match &method.sig.output { syn::ReturnType::Default => { let msg = "Invalid pallet::extra_constants, method must have a return type"; - return Err(syn::Error::new(method.span(), msg)); + return Err(syn::Error::new(method.span(), msg)) }, syn::ReturnType::Type(_, type_) => *type_.clone(), }; diff --git a/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs b/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs index 1438c400b17f12c080ea648a1d1ea457f2bd37ab..82e297b4e26e8eed66d3fa659f8a1f16f375fe81 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/genesis_build.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use syn::spanned::Spanned; use super::helper; +use syn::spanned::Spanned; /// Definition for pallet genesis build implementation. pub struct GenesisBuildDef { @@ -40,24 +40,22 @@ impl GenesisBuildDef { item } else { let msg = "Invalid pallet::genesis_build, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; - let item_trait = &item.trait_.as_ref() + let item_trait = &item + .trait_ + .as_ref() .ok_or_else(|| { let msg = "Invalid pallet::genesis_build, expected impl<..> GenesisBuild<..> \ for GenesisConfig<..>"; syn::Error::new(item.span(), msg) - })?.1; + })? + .1; let mut instances = vec![]; instances.push(helper::check_genesis_builder_usage(&item_trait)?); - Ok(Self { - attr_span, - index, - instances, - where_clause: item.generics.where_clause.clone(), - }) + Ok(Self { attr_span, index, instances, where_clause: item.generics.where_clause.clone() }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/genesis_config.rs b/substrate/frame/support/procedural/src/pallet/parse/genesis_config.rs index 729d1241390a506dd950ddd3c5f72523123255ff..a0cf7de1a846bcf42028cb1ac6a9196aaccd63b4 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/genesis_config.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/genesis_config.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use syn::spanned::Spanned; use super::helper; +use syn::spanned::Spanned; /// Definition for pallet genesis config type. /// @@ -42,7 +42,7 @@ impl GenesisConfigDef { syn::Item::Struct(item) => (&item.vis, &item.ident, &item.generics), _ => { let msg = "Invalid pallet::genesis_config, expected enum or struct"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }, }; @@ -60,19 +60,14 @@ impl GenesisConfigDef { if !matches!(vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::genesis_config, GenesisConfig must be public"; - return Err(syn::Error::new(item_span, msg)); + return Err(syn::Error::new(item_span, msg)) } if ident != "GenesisConfig" { let msg = "Invalid pallet::genesis_config, ident must `GenesisConfig`"; - return Err(syn::Error::new(ident.span(), msg)); + return Err(syn::Error::new(ident.span(), msg)) } - Ok(GenesisConfigDef { - index, - genesis_config: ident.clone(), - instances, - gen_kind, - }) + Ok(GenesisConfigDef { index, genesis_config: ident.clone(), instances, gen_kind }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/helper.rs b/substrate/frame/support/procedural/src/pallet/parse/helper.rs index 3a7729c47e1d743a1fbd9c717af2211fe29c5bea..211f1ed5ee428d8bc3905619879900a84e46212b 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/helper.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/helper.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use syn::spanned::Spanned; use quote::ToTokens; +use syn::spanned::Spanned; /// List of additional token to be used for parsing. mod keyword { @@ -47,20 +47,15 @@ pub trait MutItemAttrs { } /// Take the first pallet attribute (e.g. attribute like `#[pallet..]`) and decode it to `Attr` -pub fn take_first_item_pallet_attr(item: &mut impl MutItemAttrs) -> syn::Result> where +pub fn take_first_item_pallet_attr(item: &mut impl MutItemAttrs) -> syn::Result> +where Attr: syn::parse::Parse, { - let attrs = if let Some(attrs) = item.mut_item_attrs() { - attrs - } else { - return Ok(None) - }; - - if let Some(index) = attrs.iter() - .position(|attr| - attr.path.segments.first().map_or(false, |segment| segment.ident == "pallet") - ) - { + let attrs = if let Some(attrs) = item.mut_item_attrs() { attrs } else { return Ok(None) }; + + if let Some(index) = attrs.iter().position(|attr| { + attr.path.segments.first().map_or(false, |segment| segment.ident == "pallet") + }) { let pallet_attr = attrs.remove(index); Ok(Some(syn::parse2(pallet_attr.into_token_stream())?)) } else { @@ -69,7 +64,8 @@ pub fn take_first_item_pallet_attr(item: &mut impl MutItemAttrs) -> syn::R } /// Take all the pallet attributes (e.g. attribute like `#[pallet..]`) and decode them to `Attr` -pub fn take_item_pallet_attrs(item: &mut impl MutItemAttrs) -> syn::Result> where +pub fn take_item_pallet_attrs(item: &mut impl MutItemAttrs) -> syn::Result> +where Attr: syn::parse::Parse, { let mut pallet_attrs = Vec::new(); @@ -83,13 +79,16 @@ pub fn take_item_pallet_attrs(item: &mut impl MutItemAttrs) -> syn::Result /// Get all the cfg attributes (e.g. attribute like `#[cfg..]`) and decode them to `Attr` pub fn get_item_cfg_attrs(attrs: &[syn::Attribute]) -> Vec { - attrs.iter().filter_map(|attr| { - if attr.path.segments.first().map_or(false, |segment| segment.ident == "cfg") { - Some(attr.clone()) - } else { - None - } - }).collect::>() + attrs + .iter() + .filter_map(|attr| { + if attr.path.segments.first().map_or(false, |segment| segment.ident == "cfg") { + Some(attr.clone()) + } else { + None + } + }) + .collect::>() } impl MutItemAttrs for syn::Item { @@ -116,7 +115,6 @@ impl MutItemAttrs for syn::Item { } } - impl MutItemAttrs for syn::TraitItem { fn mut_item_attrs(&mut self) -> Option<&mut Vec> { match self { @@ -143,7 +141,8 @@ impl MutItemAttrs for syn::ItemMod { /// Return all doc attributes literals found. pub fn get_doc_literals(attrs: &Vec) -> Vec { - attrs.iter() + attrs + .iter() .filter_map(|attr| { if let Ok(syn::Meta::NameValue(meta)) = attr.parse_meta() { if meta.path.get_ident().map_or(false, |ident| ident == "doc") { @@ -166,7 +165,7 @@ impl syn::parse::Parse for Unit { syn::parenthesized!(content in input); if !content.is_empty() { let msg = "unexpected tokens, expected nothing inside parenthesis as `()`"; - return Err(syn::Error::new(content.span(), msg)); + return Err(syn::Error::new(content.span(), msg)) } Ok(Self) } @@ -179,7 +178,7 @@ impl syn::parse::Parse for StaticLifetime { let lifetime = input.parse::()?; if lifetime.ident != "static" { let msg = "unexpected tokens, expected `static`"; - return Err(syn::Error::new(lifetime.ident.span(), msg)); + return Err(syn::Error::new(lifetime.ident.span(), msg)) } Ok(Self) } @@ -190,10 +189,7 @@ impl syn::parse::Parse for StaticLifetime { /// `span` is used in case generics is empty (empty generics has span == call_site). /// /// return the instance if found. -pub fn check_config_def_gen( - gen: &syn::Generics, - span: proc_macro2::Span, -) -> syn::Result<()> { +pub fn check_config_def_gen(gen: &syn::Generics, span: proc_macro2::Span) -> syn::Result<()> { let expected = "expected `I: 'static = ()`"; pub struct CheckTraitDefGenerics; impl syn::parse::Parse for CheckTraitDefGenerics { @@ -208,13 +204,12 @@ pub fn check_config_def_gen( } } - syn::parse2::(gen.params.to_token_stream()) - .map_err(|e| { - let msg = format!("Invalid generics: {}", expected); - let mut err = syn::Error::new(span, msg); - err.combine(e); - err - })?; + syn::parse2::(gen.params.to_token_stream()).map_err(|e| { + let msg = format!("Invalid generics: {}", expected); + let mut err = syn::Error::new(span, msg); + err.combine(e); + err + })?; Ok(()) } @@ -234,10 +229,7 @@ pub fn check_type_def_gen_no_bounds( pub struct Checker(InstanceUsage); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { - let mut instance_usage = InstanceUsage { - has_instance: false, - span: input.span(), - }; + let mut instance_usage = InstanceUsage { has_instance: false, span: input.span() }; input.parse::()?; if input.peek(syn::Token![,]) { @@ -258,7 +250,8 @@ pub fn check_type_def_gen_no_bounds( let mut err = syn::Error::new(span, msg); err.combine(e); err - })?.0; + })? + .0; Ok(i) } @@ -286,10 +279,7 @@ pub fn check_type_def_optional_gen( return Ok(Self(None)) } - let mut instance_usage = InstanceUsage { - span: input.span(), - has_instance: false, - }; + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; input.parse::()?; @@ -338,9 +328,13 @@ pub fn check_type_def_optional_gen( let mut err = syn::Error::new(span, msg); err.combine(e); err - })?.0 + })? + .0 // Span can be call_site if generic is empty. Thus we replace it. - .map(|mut i| { i.span = span; i }); + .map(|mut i| { + i.span = span; + i + }); Ok(i) } @@ -355,10 +349,7 @@ pub fn check_pallet_struct_usage(type_: &Box) -> syn::Result syn::Result { - let mut instance_usage = InstanceUsage { - span: input.span(), - has_instance: false, - }; + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; input.parse::()?; input.parse::()?; @@ -380,7 +371,8 @@ pub fn check_pallet_struct_usage(type_: &Box) -> syn::Result) -> syn::Result syn::Result { +pub fn check_impl_gen(gen: &syn::Generics, span: proc_macro2::Span) -> syn::Result { let expected = "expected `impl` or `impl, I: 'static>`"; pub struct Checker(InstanceUsage); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { - let mut instance_usage = InstanceUsage { - span: input.span(), - has_instance: false, - }; + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; input.parse::()?; input.parse::()?; @@ -428,7 +414,8 @@ pub fn check_impl_gen( let mut err = syn::Error::new(span, format!("Invalid generics: {}", expected)); err.combine(e); err - })?.0; + })? + .0; Ok(i) } @@ -451,10 +438,7 @@ pub fn check_type_def_gen( pub struct Checker(InstanceUsage); impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { - let mut instance_usage = InstanceUsage { - span: input.span(), - has_instance: false, - }; + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; input.parse::()?; @@ -503,7 +487,8 @@ pub fn check_type_def_gen( let mut err = syn::Error::new(span, msg); err.combine(e); err - })?.0; + })? + .0; // Span can be call_site if generic is empty. Thus we replace it. i.span = span; @@ -521,10 +506,7 @@ pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result syn::Result { - let mut instance_usage = InstanceUsage { - span: input.span(), - has_instance: false, - }; + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; input.parse::()?; input.parse::()?; @@ -546,7 +528,8 @@ pub fn check_genesis_builder_usage(type_: &syn::Path) -> syn::Result()?; input.parse::()?; - let mut instance_usage = InstanceUsage { - span: input.span(), - has_instance: false, - }; + let mut instance_usage = InstanceUsage { span: input.span(), has_instance: false }; if input.is_empty() { return Ok(Self(Some(instance_usage))) @@ -603,17 +583,19 @@ pub fn check_type_value_gen( let mut err = syn::Error::new(span, msg); err.combine(e); err - })?.0 + })? + .0 // Span can be call_site if generic is empty. Thus we replace it. - .map(|mut i| { i.span = span; i }); + .map(|mut i| { + i.span = span; + i + }); Ok(i) } /// Check the keyword `DispatchResultWithPostInfo` or `DispatchResult`. -pub fn check_pallet_call_return_type( - type_: &syn::Type, -) -> syn::Result<()> { +pub fn check_pallet_call_return_type(type_: &syn::Type) -> syn::Result<()> { pub struct Checker; impl syn::parse::Parse for Checker { fn parse(input: syn::parse::ParseStream) -> syn::Result { diff --git a/substrate/frame/support/procedural/src/pallet/parse/hooks.rs b/substrate/frame/support/procedural/src/pallet/parse/hooks.rs index 99ae3ed625414dd0342726e71d1a37f2a52a3dc3..1dd86498f22d56cf8c6392f29633ddd2eb229233 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/hooks.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/hooks.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use syn::spanned::Spanned; use super::helper; +use syn::spanned::Spanned; /// Implementation of the pallet hooks. pub struct HooksDef { @@ -42,30 +42,31 @@ impl HooksDef { item } else { let msg = "Invalid pallet::hooks, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; let mut instances = vec![]; instances.push(helper::check_pallet_struct_usage(&item.self_ty)?); instances.push(helper::check_impl_gen(&item.generics, item.impl_token.span())?); - let item_trait = &item.trait_.as_ref() + let item_trait = &item + .trait_ + .as_ref() .ok_or_else(|| { let msg = "Invalid pallet::hooks, expected impl<..> Hooks \ for Pallet<..>"; syn::Error::new(item.span(), msg) - })?.1; + })? + .1; - if item_trait.segments.len() != 1 - || item_trait.segments[0].ident != "Hooks" - { + if item_trait.segments.len() != 1 || item_trait.segments[0].ident != "Hooks" { let msg = format!( "Invalid pallet::hooks, expected trait to be `Hooks` found `{}`\ , you can import from `frame_support::pallet_prelude`", quote::quote!(#item_trait) ); - return Err(syn::Error::new(item_trait.span(), msg)); + return Err(syn::Error::new(item_trait.span(), msg)) } let has_runtime_upgrade = item.items.iter().any(|i| match i { diff --git a/substrate/frame/support/procedural/src/pallet/parse/inherent.rs b/substrate/frame/support/procedural/src/pallet/parse/inherent.rs index a3f12b1574981345d3132a7cfb9933f2e7fe4b9d..de5ad8f795db5056617a3b9eefd4b01b8151f2f4 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/inherent.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/inherent.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use syn::spanned::Spanned; use super::helper; +use syn::spanned::Spanned; /// The definition of the pallet inherent implementation. pub struct InherentDef { @@ -32,22 +32,22 @@ impl InherentDef { item } else { let msg = "Invalid pallet::inherent, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; if item.trait_.is_none() { let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { if last.ident != "ProvideInherent" { let msg = "Invalid pallet::inherent, expected trait ProvideInherent"; - return Err(syn::Error::new(last.span(), msg)); + return Err(syn::Error::new(last.span(), msg)) } } else { let msg = "Invalid pallet::inherent, expected impl<..> ProvideInherent for Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let mut instances = vec![]; diff --git a/substrate/frame/support/procedural/src/pallet/parse/mod.rs b/substrate/frame/support/procedural/src/pallet/parse/mod.rs index 2f378c52e8b33472462a4296e264e539f23effe0..c7367e582044bb830ea71850535e7e2c8e976e52 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/mod.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/mod.rs @@ -19,24 +19,24 @@ //! //! Parse the module into `Def` struct through `Def::try_from` function. -pub mod config; -pub mod pallet_struct; -pub mod hooks; pub mod call; +pub mod config; pub mod error; -pub mod origin; -pub mod inherent; -pub mod storage; pub mod event; -pub mod helper; -pub mod genesis_config; +pub mod extra_constants; pub mod genesis_build; -pub mod validate_unsigned; +pub mod genesis_config; +pub mod helper; +pub mod hooks; +pub mod inherent; +pub mod origin; +pub mod pallet_struct; +pub mod storage; pub mod type_value; -pub mod extra_constants; +pub mod validate_unsigned; -use syn::spanned::Spanned; use frame_support_procedural_tools::generate_crate_access_2018; +use syn::spanned::Spanned; /// Parsed definition of a pallet. pub struct Def { @@ -67,11 +67,14 @@ impl Def { let frame_support = generate_crate_access_2018("frame-support")?; let item_span = item.span(); - let items = &mut item.content.as_mut() + let items = &mut item + .content + .as_mut() .ok_or_else(|| { let msg = "Invalid pallet definition, expected mod to be inlined."; syn::Error::new(item_span, msg) - })?.1; + })? + .1; let mut config = None; let mut pallet_struct = None; @@ -128,13 +131,12 @@ impl Def { }, Some(PalletAttr::TypeValue(span)) => type_values.push(type_value::TypeValueDef::try_from(span, index, item)?), - Some(PalletAttr::ExtraConstants(_)) => { + Some(PalletAttr::ExtraConstants(_)) => extra_constants = - Some(extra_constants::ExtraConstantsDef::try_from(index, item)?) - }, + Some(extra_constants::ExtraConstantsDef::try_from(index, item)?), Some(attr) => { let msg = "Invalid duplicated attribute"; - return Err(syn::Error::new(attr.span(), msg)); + return Err(syn::Error::new(attr.span(), msg)) }, None => (), } @@ -148,12 +150,13 @@ impl Def { genesis_config.as_ref().map_or("unused", |_| "used"), genesis_build.as_ref().map_or("unused", |_| "used"), ); - return Err(syn::Error::new(item_span, msg)); + return Err(syn::Error::new(item_span, msg)) } let def = Def { item, - config: config.ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::config]`"))?, + config: config + .ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::config]`"))?, pallet_struct: pallet_struct .ok_or_else(|| syn::Error::new(item_span, "Missing `#[pallet::pallet]`"))?, hooks, @@ -181,10 +184,7 @@ impl Def { /// Check that usage of trait `Event` is consistent with the definition, i.e. it is declared /// and trait defines type Event, or not declared and no trait associated type. fn check_event_usage(&self) -> syn::Result<()> { - match ( - self.config.has_event_type, - self.event.is_some(), - ) { + match (self.config.has_event_type, self.event.is_some()) { (true, false) => { let msg = "Invalid usage of Event, `Config` contains associated type `Event`, \ but enum `Event` is not declared (i.e. no use of `#[pallet::event]`). \ @@ -197,7 +197,7 @@ impl Def { An Event associated type must be declare on trait `Config`."; Err(syn::Error::new(proc_macro2::Span::call_site(), msg)) }, - _ => Ok(()) + _ => Ok(()), } } @@ -235,19 +235,18 @@ impl Def { instances.extend_from_slice(&extra_constants.instances[..]); } - let mut errors = instances.into_iter() - .filter_map(|instances| { - if instances.has_instance == self.config.has_instance { - return None - } - let msg = if self.config.has_instance { - "Invalid generic declaration, trait is defined with instance but generic use none" - } else { - "Invalid generic declaration, trait is defined without instance but generic use \ + let mut errors = instances.into_iter().filter_map(|instances| { + if instances.has_instance == self.config.has_instance { + return None + } + let msg = if self.config.has_instance { + "Invalid generic declaration, trait is defined with instance but generic use none" + } else { + "Invalid generic declaration, trait is defined without instance but generic use \ some" - }; - Some(syn::Error::new(instances.span, msg)) - }); + }; + Some(syn::Error::new(instances.span, msg)) + }); if let Some(mut first_error) = errors.next() { for error in errors { @@ -351,7 +350,8 @@ impl GenericKind { match self { GenericKind::None => quote::quote!(), GenericKind::Config => quote::quote_spanned!(span => T: Config), - GenericKind::ConfigAndInstance => quote::quote_spanned!(span => T: Config, I: 'static), + GenericKind::ConfigAndInstance => + quote::quote_spanned!(span => T: Config, I: 'static), } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/origin.rs b/substrate/frame/support/procedural/src/pallet/parse/origin.rs index 2b47978b808a8085e03e93cd281c9c4cd6bb7b5b..c4e1197ac511cd9989b790930dcf1da7c6a48dcb 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/origin.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/origin.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use syn::spanned::Spanned; use super::helper; +use syn::spanned::Spanned; /// Definition of the pallet origin type. /// @@ -42,7 +42,7 @@ impl OriginDef { syn::Item::Type(item) => (&item.vis, &item.ident, &item.generics), _ => { let msg = "Invalid pallet::origin, expected enum or struct or type"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }, }; @@ -54,27 +54,19 @@ impl OriginDef { instances.push(u); } else { // construct_runtime only allow generic event for instantiable pallet. - instances.push(helper::InstanceUsage { - has_instance: false, - span: ident.span(), - }) + instances.push(helper::InstanceUsage { has_instance: false, span: ident.span() }) } if !matches!(vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::origin, Origin must be public"; - return Err(syn::Error::new(item_span, msg)); + return Err(syn::Error::new(item_span, msg)) } if ident != "Origin" { let msg = "Invalid pallet::origin, ident must `Origin`"; - return Err(syn::Error::new(ident.span(), msg)); + return Err(syn::Error::new(ident.span(), msg)) } - Ok(OriginDef { - index, - has_instance, - is_generic, - instances, - }) + Ok(OriginDef { index, has_instance, is_generic, instances }) } } diff --git a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs index ba85da2d9e6843380668466097229a262318be23..088b647fad7de39f7fbc1a8af913c7f629622eab 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -16,8 +16,8 @@ // limitations under the License. use super::helper; -use syn::spanned::Spanned; use quote::ToTokens; +use syn::spanned::Spanned; /// List of additional token to be used for parsing. mod keyword { @@ -49,11 +49,7 @@ pub struct PalletStructDef { /// * `#[pallet::generate_store($vis trait Store)]` /// * `#[pallet::generate_storage_info]` pub enum PalletStructAttr { - GenerateStore { - span: proc_macro2::Span, - vis: syn::Visibility, - keyword: keyword::Store, - }, + GenerateStore { span: proc_macro2::Span, vis: syn::Visibility, keyword: keyword::Store }, GenerateStorageInfoTrait(proc_macro2::Span), } @@ -103,7 +99,7 @@ impl PalletStructDef { item } else { let msg = "Invalid pallet::pallet, expected struct definition"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; let mut store = None; @@ -115,12 +111,14 @@ impl PalletStructDef { PalletStructAttr::GenerateStore { vis, keyword, .. } if store.is_none() => { store = Some((vis, keyword)); }, - PalletStructAttr::GenerateStorageInfoTrait(span) if generate_storage_info.is_none() => { + PalletStructAttr::GenerateStorageInfoTrait(span) + if generate_storage_info.is_none() => + { generate_storage_info = Some(span); - }, + } attr => { let msg = "Unexpected duplicated attribute"; - return Err(syn::Error::new(attr.span(), msg)); + return Err(syn::Error::new(attr.span(), msg)) }, } } @@ -129,12 +127,12 @@ impl PalletStructDef { if !matches!(item.vis, syn::Visibility::Public(_)) { let msg = "Invalid pallet::pallet, Pallet must be public"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } if item.generics.where_clause.is_some() { let msg = "Invalid pallet::pallet, where clause not supported on Pallet declaration"; - return Err(syn::Error::new(item.generics.where_clause.span(), msg)); + return Err(syn::Error::new(item.generics.where_clause.span(), msg)) } let mut instances = vec![]; diff --git a/substrate/frame/support/procedural/src/pallet/parse/storage.rs b/substrate/frame/support/procedural/src/pallet/parse/storage.rs index 9ec890e66e57a6ebbc0fd1159959c8c97749ec9c..7927aa2455fe3ecdc9f40f57858c7dba6ed51f7a 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/storage.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/storage.rs @@ -16,9 +16,9 @@ // limitations under the License. use super::helper; -use syn::spanned::Spanned; use quote::ToTokens; use std::collections::HashMap; +use syn::spanned::Spanned; /// List of additional token to be used for parsing. mod keyword { @@ -69,11 +69,10 @@ impl syn::parse::Parse for PalletStorageAttr { let renamed_prefix = content.parse::()?; // Ensure the renamed prefix is a proper Rust identifier - syn::parse_str::(&renamed_prefix.value()) - .map_err(|_| { - let msg = format!("`{}` is not a valid identifier", renamed_prefix.value()); - syn::Error::new(renamed_prefix.span(), msg) - })?; + syn::parse_str::(&renamed_prefix.value()).map_err(|_| { + let msg = format!("`{}` is not a valid identifier", renamed_prefix.value()); + syn::Error::new(renamed_prefix.span(), msg) + })?; Ok(Self::StorageName(renamed_prefix, attr_span)) } else { @@ -86,16 +85,8 @@ impl syn::parse::Parse for PalletStorageAttr { pub enum Metadata { Value { value: syn::Type }, Map { value: syn::Type, key: syn::Type }, - DoubleMap { - value: syn::Type, - key1: syn::Type, - key2: syn::Type - }, - NMap { - keys: Vec, - keygen: syn::Type, - value: syn::Type, - }, + DoubleMap { value: syn::Type, key1: syn::Type, key2: syn::Type }, + NMap { keys: Vec, keygen: syn::Type, value: syn::Type }, } pub enum QueryKind { @@ -181,11 +172,8 @@ impl StorageGenerics { Self::DoubleMap { value, key1, key2, .. } => Metadata::DoubleMap { value, key1, key2 }, Self::Map { value, key, .. } => Metadata::Map { value, key }, Self::Value { value, .. } => Metadata::Value { value }, - Self::NMap { keygen, value, .. } => Metadata::NMap { - keys: collect_keys(&keygen)?, - keygen, - value, - }, + Self::NMap { keygen, value, .. } => + Metadata::NMap { keys: collect_keys(&keygen)?, keygen, value }, }; Ok(res) @@ -194,11 +182,10 @@ impl StorageGenerics { /// Return the query kind from the defined generics fn query_kind(&self) -> Option { match &self { - Self::DoubleMap { query_kind, .. } - | Self::Map { query_kind, .. } - | Self::Value { query_kind, .. } - | Self::NMap { query_kind, .. } - => query_kind.clone(), + Self::DoubleMap { query_kind, .. } | + Self::Map { query_kind, .. } | + Self::Value { query_kind, .. } | + Self::NMap { query_kind, .. } => query_kind.clone(), } } } @@ -225,7 +212,10 @@ fn check_generics( let mut e = format!( "`{}` expect generics {}and optional generics {}", storage_type_name, - mandatory_generics.iter().map(|name| format!("`{}`, ", name)).collect::(), + mandatory_generics + .iter() + .map(|name| format!("`{}`, ", name)) + .collect::(), &optional_generics.iter().map(|name| format!("`{}`, ", name)).collect::(), ); e.pop(); @@ -235,14 +225,12 @@ fn check_generics( }; for (gen_name, gen_binding) in map { - if !mandatory_generics.contains(&gen_name.as_str()) - && !optional_generics.contains(&gen_name.as_str()) + if !mandatory_generics.contains(&gen_name.as_str()) && + !optional_generics.contains(&gen_name.as_str()) { let msg = format!( "Invalid pallet::storage, Unexpected generic `{}` for `{}`. {}", - gen_name, - storage_type_name, - expectation, + gen_name, storage_type_name, expectation, ); errors.push(syn::Error::new(gen_binding.span(), msg)); } @@ -252,8 +240,7 @@ fn check_generics( if !map.contains_key(&mandatory_generic.to_string()) { let msg = format!( "Invalid pallet::storage, cannot find `{}` generic, required for `{}`.", - mandatory_generic, - storage_type_name + mandatory_generic, storage_type_name ); errors.push(syn::Error::new(args_span, msg)); } @@ -284,7 +271,7 @@ fn process_named_generics( let msg = "Invalid pallet::storage, Duplicated named generic"; let mut err = syn::Error::new(arg.ident.span(), msg); err.combine(syn::Error::new(other.ident.span(), msg)); - return Err(err); + return Err(err) } parsed.insert(arg.ident.to_string(), arg.clone()); } @@ -300,15 +287,14 @@ fn process_named_generics( )?; StorageGenerics::Value { - value: parsed.remove("Value") + value: parsed + .remove("Value") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - query_kind: parsed.remove("QueryKind") - .map(|binding| binding.ty), - on_empty: parsed.remove("OnEmpty") - .map(|binding| binding.ty), + query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), + on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), } - } + }, StorageKind::Map => { check_generics( &parsed, @@ -319,20 +305,23 @@ fn process_named_generics( )?; StorageGenerics::Map { - hasher: parsed.remove("Hasher") + hasher: parsed + .remove("Hasher") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - key: parsed.remove("Key") + key: parsed + .remove("Key") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - value: parsed.remove("Value") + value: parsed + .remove("Value") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), max_values: parsed.remove("MaxValues").map(|binding| binding.ty), } - } + }, StorageKind::DoubleMap => { check_generics( &parsed, @@ -343,26 +332,31 @@ fn process_named_generics( )?; StorageGenerics::DoubleMap { - hasher1: parsed.remove("Hasher1") + hasher1: parsed + .remove("Hasher1") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - key1: parsed.remove("Key1") + key1: parsed + .remove("Key1") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - hasher2: parsed.remove("Hasher2") + hasher2: parsed + .remove("Hasher2") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - key2: parsed.remove("Key2") + key2: parsed + .remove("Key2") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - value: parsed.remove("Value") + value: parsed + .remove("Value") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), max_values: parsed.remove("MaxValues").map(|binding| binding.ty), } - } + }, StorageKind::NMap => { check_generics( &parsed, @@ -373,17 +367,19 @@ fn process_named_generics( )?; StorageGenerics::NMap { - keygen: parsed.remove("Key") + keygen: parsed + .remove("Key") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), - value: parsed.remove("Value") + value: parsed + .remove("Value") .map(|binding| binding.ty) .expect("checked above as mandatory generic"), query_kind: parsed.remove("QueryKind").map(|binding| binding.ty), on_empty: parsed.remove("OnEmpty").map(|binding| binding.ty), max_values: parsed.remove("MaxValues").map(|binding| binding.ty), } - } + }, }; let metadata = generics.metadata()?; @@ -399,41 +395,32 @@ fn process_unnamed_generics( args: &[syn::Type], ) -> syn::Result<(Option, Metadata, Option)> { let retrieve_arg = |arg_pos| { - args.get(arg_pos) - .cloned() - .ok_or_else(|| { - let msg = format!( - "Invalid pallet::storage, unexpected number of generic argument, \ + args.get(arg_pos).cloned().ok_or_else(|| { + let msg = format!( + "Invalid pallet::storage, unexpected number of generic argument, \ expect at least {} args, found {}.", - arg_pos + 1, - args.len(), - ); - syn::Error::new(args_span, msg) - }) + arg_pos + 1, + args.len(), + ); + syn::Error::new(args_span, msg) + }) }; let prefix_arg = retrieve_arg(0)?; - syn::parse2::(prefix_arg.to_token_stream()) - .map_err(|e| { - let msg = "Invalid pallet::storage, for unnamed generic arguments the type \ + syn::parse2::(prefix_arg.to_token_stream()).map_err(|e| { + let msg = "Invalid pallet::storage, for unnamed generic arguments the type \ first generic argument must be `_`, the argument is then replaced by macro."; - let mut err = syn::Error::new(prefix_arg.span(), msg); - err.combine(e); - err - })?; + let mut err = syn::Error::new(prefix_arg.span(), msg); + err.combine(e); + err + })?; let res = match storage { - StorageKind::Value => ( - None, - Metadata::Value { value: retrieve_arg(1)? }, - retrieve_arg(2).ok(), - ), + StorageKind::Value => + (None, Metadata::Value { value: retrieve_arg(1)? }, retrieve_arg(2).ok()), StorageKind::Map => ( None, - Metadata::Map { - key: retrieve_arg(2)?, - value: retrieve_arg(3)?, - }, + Metadata::Map { key: retrieve_arg(2)?, value: retrieve_arg(3)? }, retrieve_arg(4).ok(), ), StorageKind::DoubleMap => ( @@ -448,15 +435,7 @@ fn process_unnamed_generics( StorageKind::NMap => { let keygen = retrieve_arg(1)?; let keys = collect_keys(&keygen)?; - ( - None, - Metadata::NMap { - keys, - keygen, - value: retrieve_arg(2)?, - }, - retrieve_arg(3).ok(), - ) + (None, Metadata::NMap { keys, keygen, value: retrieve_arg(2)? }, retrieve_arg(3).ok()) }, }; @@ -479,8 +458,8 @@ fn process_generics( found `{}`.", found, ); - return Err(syn::Error::new(segment.ident.span(), msg)); - } + return Err(syn::Error::new(segment.ident.span(), msg)) + }, }; let args_span = segment.arguments.span(); @@ -490,12 +469,14 @@ fn process_generics( _ => { let msg = "Invalid pallet::storage, invalid number of generic generic arguments, \ expect more that 0 generic arguments."; - return Err(syn::Error::new(segment.span(), msg)); - } + return Err(syn::Error::new(segment.span(), msg)) + }, }; if args.args.iter().all(|gen| matches!(gen, syn::GenericArgument::Type(_))) { - let args = args.args.iter() + let args = args + .args + .iter() .map(|gen| match gen { syn::GenericArgument::Type(gen) => gen.clone(), _ => unreachable!("It is asserted above that all generics are types"), @@ -503,7 +484,9 @@ fn process_generics( .collect::>(); process_unnamed_generics(&storage_kind, args_span, &args) } else if args.args.iter().all(|gen| matches!(gen, syn::GenericArgument::Binding(_))) { - let args = args.args.iter() + let args = args + .args + .iter() .map(|gen| match gen { syn::GenericArgument::Binding(gen) => gen.clone(), _ => unreachable!("It is asserted above that all generics are bindings"), @@ -521,11 +504,7 @@ fn process_generics( /// Parse the 2nd type argument to `StorageNMap` and return its keys. fn collect_keys(keygen: &syn::Type) -> syn::Result> { if let syn::Type::Tuple(tup) = keygen { - tup - .elems - .iter() - .map(extract_key) - .collect::>>() + tup.elems.iter().map(extract_key).collect::>>() } else { Ok(vec![extract_key(keygen)?]) } @@ -537,7 +516,7 @@ fn extract_key(ty: &syn::Type) -> syn::Result { typ } else { let msg = "Invalid pallet::storage, expected type path"; - return Err(syn::Error::new(ty.span(), msg)); + return Err(syn::Error::new(ty.span(), msg)) }; let key_struct = typ.path.segments.last().ok_or_else(|| { @@ -546,28 +525,31 @@ fn extract_key(ty: &syn::Type) -> syn::Result { })?; if key_struct.ident != "Key" && key_struct.ident != "NMapKey" { let msg = "Invalid pallet::storage, expected Key or NMapKey struct"; - return Err(syn::Error::new(key_struct.ident.span(), msg)); + return Err(syn::Error::new(key_struct.ident.span(), msg)) } let ty_params = if let syn::PathArguments::AngleBracketed(args) = &key_struct.arguments { args } else { let msg = "Invalid pallet::storage, expected angle bracketed arguments"; - return Err(syn::Error::new(key_struct.arguments.span(), msg)); + return Err(syn::Error::new(key_struct.arguments.span(), msg)) }; if ty_params.args.len() != 2 { - let msg = format!("Invalid pallet::storage, unexpected number of generic arguments \ - for Key struct, expected 2 args, found {}", ty_params.args.len()); - return Err(syn::Error::new(ty_params.span(), msg)); + let msg = format!( + "Invalid pallet::storage, unexpected number of generic arguments \ + for Key struct, expected 2 args, found {}", + ty_params.args.len() + ); + return Err(syn::Error::new(ty_params.span(), msg)) } let key = match &ty_params.args[1] { syn::GenericArgument::Type(key_ty) => key_ty.clone(), _ => { let msg = "Invalid pallet::storage, expected type"; - return Err(syn::Error::new(ty_params.args[1].span(), msg)); - } + return Err(syn::Error::new(ty_params.args[1].span(), msg)) + }, }; Ok(key) @@ -576,8 +558,7 @@ fn extract_key(ty: &syn::Type) -> syn::Result { impl StorageDef { /// Return the storage prefix for this storage item pub fn prefix(&self) -> String { - self - .rename_as + self.rename_as .as_ref() .map(syn::LitStr::value) .unwrap_or(self.ident.to_string()) @@ -586,11 +567,7 @@ impl StorageDef { /// Return either the span of the ident or the span of the literal in the /// #[storage_prefix] attribute pub fn prefix_span(&self) -> proc_macro2::Span { - self - .rename_as - .as_ref() - .map(syn::LitStr::span) - .unwrap_or(self.ident.span()) + self.rename_as.as_ref().map(syn::LitStr::span).unwrap_or(self.ident.span()) } pub fn try_from( @@ -601,7 +578,7 @@ impl StorageDef { let item = if let syn::Item::Type(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")); + return Err(syn::Error::new(item.span(), "Invalid pallet::storage, expect item type.")) }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; @@ -610,23 +587,19 @@ impl StorageDef { .partition::, _>(|attr| matches!(attr, PalletStorageAttr::Getter(..))); if getters.len() > 1 { let msg = "Invalid pallet::storage, multiple argument pallet::getter found"; - return Err(syn::Error::new(getters[1].attr_span(), msg)); + return Err(syn::Error::new(getters[1].attr_span(), msg)) } if names.len() > 1 { let msg = "Invalid pallet::storage, multiple argument pallet::storage_prefix found"; - return Err(syn::Error::new(names[1].attr_span(), msg)); + return Err(syn::Error::new(names[1].attr_span(), msg)) } - let getter = getters.pop().map(|attr| { - match attr { - PalletStorageAttr::Getter(ident, _) => ident, - _ => unreachable!(), - } + let getter = getters.pop().map(|attr| match attr { + PalletStorageAttr::Getter(ident, _) => ident, + _ => unreachable!(), }); - let rename_as = names.pop().map(|attr| { - match attr { - PalletStorageAttr::StorageName(lit, _) => lit, - _ => unreachable!(), - } + let rename_as = names.pop().map(|attr| match attr { + PalletStorageAttr::StorageName(lit, _) => lit, + _ => unreachable!(), }); let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); @@ -641,12 +614,12 @@ impl StorageDef { typ } else { let msg = "Invalid pallet::storage, expected type path"; - return Err(syn::Error::new(item.ty.span(), msg)); + return Err(syn::Error::new(item.ty.span(), msg)) }; if typ.path.segments.len() != 1 { let msg = "Invalid pallet::storage, expected type path with one segment"; - return Err(syn::Error::new(item.ty.span(), msg)); + return Err(syn::Error::new(item.ty.span(), msg)) } let (named_generics, metadata, query_kind) = process_generics(&typ.path.segments[0])?; @@ -654,11 +627,11 @@ impl StorageDef { let query_kind = query_kind .map(|query_kind| match query_kind { syn::Type::Path(path) - if path.path.segments.last().map_or(false, |s| s.ident == "OptionQuery") - => Some(QueryKind::OptionQuery), + if path.path.segments.last().map_or(false, |s| s.ident == "OptionQuery") => + Some(QueryKind::OptionQuery), syn::Type::Path(path) - if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") - => Some(QueryKind::ValueQuery), + if path.path.segments.last().map_or(false, |s| s.ident == "ValueQuery") => + Some(QueryKind::ValueQuery), _ => None, }) .unwrap_or(Some(QueryKind::OptionQuery)); // This value must match the default generic. @@ -667,7 +640,7 @@ impl StorageDef { let msg = "Invalid pallet::storage, cannot generate getter because QueryKind is not \ identifiable. QueryKind must be `OptionQuery`, `ValueQuery`, or default one to be \ identifiable."; - return Err(syn::Error::new(getter.unwrap().span(), msg)); + return Err(syn::Error::new(getter.unwrap().span(), msg)) } Ok(StorageDef { diff --git a/substrate/frame/support/procedural/src/pallet/parse/type_value.rs b/substrate/frame/support/procedural/src/pallet/parse/type_value.rs index 58e6105818e01560031d9f8501c70cb7c4244727..7b9d57472db4b83f1040c4648fee1b21115da993 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/type_value.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/type_value.rs @@ -50,28 +50,31 @@ impl TypeValueDef { item } else { let msg = "Invalid pallet::type_value, expected item fn"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; - if !item.attrs.is_empty() { let msg = "Invalid pallet::type_value, unexpected attribute"; - return Err(syn::Error::new(item.attrs[0].span(), msg)); + return Err(syn::Error::new(item.attrs[0].span(), msg)) } - if let Some(span) = item.sig.constness.as_ref().map(|t| t.span()) + if let Some(span) = item + .sig + .constness + .as_ref() + .map(|t| t.span()) .or_else(|| item.sig.asyncness.as_ref().map(|t| t.span())) .or_else(|| item.sig.unsafety.as_ref().map(|t| t.span())) .or_else(|| item.sig.abi.as_ref().map(|t| t.span())) .or_else(|| item.sig.variadic.as_ref().map(|t| t.span())) { let msg = "Invalid pallet::type_value, unexpected token"; - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } if !item.sig.inputs.is_empty() { let msg = "Invalid pallet::type_value, unexpected argument"; - return Err(syn::Error::new(item.sig.inputs[0].span(), msg)); + return Err(syn::Error::new(item.sig.inputs[0].span(), msg)) } let vis = item.vis.clone(); @@ -81,7 +84,7 @@ impl TypeValueDef { syn::ReturnType::Type(_, type_) => type_, syn::ReturnType::Default => { let msg = "Invalid pallet::type_value, expected return type"; - return Err(syn::Error::new(item.sig.span(), msg)); + return Err(syn::Error::new(item.sig.span(), msg)) }, }; diff --git a/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs b/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs index 0a406413f394003dc5ec307a1672126238a7419b..87e2a326f186232ef9739427b4a701b7d5a467f3 100644 --- a/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs +++ b/substrate/frame/support/procedural/src/pallet/parse/validate_unsigned.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use syn::spanned::Spanned; use super::helper; +use syn::spanned::Spanned; /// The definition of the pallet validate unsigned implementation. pub struct ValidateUnsignedDef { @@ -32,24 +32,24 @@ impl ValidateUnsignedDef { item } else { let msg = "Invalid pallet::validate_unsigned, expected item impl"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) }; if item.trait_.is_none() { let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } if let Some(last) = item.trait_.as_ref().unwrap().1.segments.last() { if last.ident != "ValidateUnsigned" { let msg = "Invalid pallet::validate_unsigned, expected trait ValidateUnsigned"; - return Err(syn::Error::new(last.span(), msg)); + return Err(syn::Error::new(last.span(), msg)) } } else { let msg = "Invalid pallet::validate_unsigned, expected impl<..> ValidateUnsigned for \ Pallet<..>"; - return Err(syn::Error::new(item.span(), msg)); + return Err(syn::Error::new(item.span(), msg)) } let mut instances = vec![]; diff --git a/substrate/frame/support/procedural/src/pallet_version.rs b/substrate/frame/support/procedural/src/pallet_version.rs index 0f3c478d4977aff6fb330b5ee41b1f123b939955..f0821f343c0354e69972dbc5e5ff83baef60778d 100644 --- a/substrate/frame/support/procedural/src/pallet_version.rs +++ b/substrate/frame/support/procedural/src/pallet_version.rs @@ -17,10 +17,10 @@ //! Implementation of macros related to pallet versioning. -use proc_macro2::{TokenStream, Span}; -use syn::{Result, Error}; -use std::{env, str::FromStr}; use frame_support_procedural_tools::generate_crate_access_2018; +use proc_macro2::{Span, TokenStream}; +use std::{env, str::FromStr}; +use syn::{Error, Result}; /// Get the version from the given version environment variable. /// diff --git a/substrate/frame/support/procedural/src/partial_eq_no_bound.rs b/substrate/frame/support/procedural/src/partial_eq_no_bound.rs index 1c37be8021c9ed93fb2413b6ec2662697187e14f..3dbabf3f5d39ab5b404c11213b30a04c297f103b 100644 --- a/substrate/frame/support/procedural/src/partial_eq_no_bound.rs +++ b/substrate/frame/support/procedural/src/partial_eq_no_bound.rs @@ -30,41 +30,47 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: let impl_ = match input.data { syn::Data::Struct(struct_) => match struct_.fields { syn::Fields::Named(named) => { - let fields = named.named.iter() + let fields = named + .named + .iter() .map(|i| &i.ident) .map(|i| quote::quote_spanned!(i.span() => self.#i == other.#i )); quote::quote!( true #( && #fields )* ) }, syn::Fields::Unnamed(unnamed) => { - let fields = unnamed.unnamed.iter().enumerate() + let fields = unnamed + .unnamed + .iter() + .enumerate() .map(|(i, _)| syn::Index::from(i)) .map(|i| quote::quote_spanned!(i.span() => self.#i == other.#i )); quote::quote!( true #( && #fields )* ) }, syn::Fields::Unit => { - quote::quote!( true ) - } + quote::quote!(true) + }, }, syn::Data::Enum(enum_) => { - let variants = enum_.variants.iter() - .map(|variant| { + let variants = + enum_.variants.iter().map(|variant| { let ident = &variant.ident; match &variant.fields { syn::Fields::Named(named) => { let names = named.named.iter().map(|i| &i.ident); - let other_names = names.clone() - .enumerate() - .map(|(n, ident)| - syn::Ident::new(&format!("_{}", n), ident.span()) - ); + let other_names = names.clone().enumerate().map(|(n, ident)| { + syn::Ident::new(&format!("_{}", n), ident.span()) + }); let capture = names.clone(); - let other_capture = names.clone().zip(other_names.clone()) + let other_capture = names + .clone() + .zip(other_names.clone()) .map(|(i, other_i)| quote::quote!(#i: #other_i)); - let eq = names.zip(other_names) - .map(|(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i)); + let eq = names.zip(other_names).map( + |(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i), + ); quote::quote!( ( Self::#ident { #( #capture, )* }, @@ -73,12 +79,18 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: ) }, syn::Fields::Unnamed(unnamed) => { - let names = unnamed.unnamed.iter().enumerate() + let names = unnamed + .unnamed + .iter() + .enumerate() .map(|(i, f)| syn::Ident::new(&format!("_{}", i), f.span())); - let other_names = unnamed.unnamed.iter().enumerate() - .map(|(i, f)| syn::Ident::new(&format!("_{}_other", i), f.span())); - let eq = names.clone().zip(other_names.clone()) - .map(|(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i)); + let other_names = + unnamed.unnamed.iter().enumerate().map(|(i, f)| { + syn::Ident::new(&format!("_{}_other", i), f.span()) + }); + let eq = names.clone().zip(other_names.clone()).map( + |(i, other_i)| quote::quote_spanned!(i.span() => #i == #other_i), + ); quote::quote!( ( Self::#ident ( #( #names, )* ), @@ -122,5 +134,6 @@ pub fn derive_partial_eq_no_bound(input: proc_macro::TokenStream) -> proc_macro: } } }; - ).into() + ) + .into() } diff --git a/substrate/frame/support/procedural/src/storage/genesis_config/builder_def.rs b/substrate/frame/support/procedural/src/storage/genesis_config/builder_def.rs index 5b73928951cfa520b06dfcf496e6f4dfc62a0052..9669212f198fca293aa09225f1a79968320036c9 100644 --- a/substrate/frame/support/procedural/src/storage/genesis_config/builder_def.rs +++ b/substrate/frame/support/procedural/src/storage/genesis_config/builder_def.rs @@ -17,11 +17,11 @@ //! Builder logic definition used to build genesis storage. +use super::super::{DeclStorageDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::TokenStream; -use syn::spanned::Spanned; use quote::{quote, quote_spanned}; -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; +use syn::spanned::Spanned; /// Definition of builder blocks, each block insert some value in the storage. /// They must be called inside externalities, and with `self` being the genesis config. @@ -79,7 +79,7 @@ impl BuilderDef { if let Some(data) = data { blocks.push(match &line.storage_type { StorageLineTypeDef::Simple(_) if line.is_option => { - quote!{{ + quote! {{ #data let v: Option<&#value_type>= data; if let Some(v) = v { @@ -88,7 +88,7 @@ impl BuilderDef { }} }, StorageLineTypeDef::Simple(_) if !line.is_option => { - quote!{{ + quote! {{ #data let v: &#value_type = data; <#storage_struct as #scrate::#storage_trait>::put::<&#value_type>(v); @@ -97,7 +97,7 @@ impl BuilderDef { StorageLineTypeDef::Simple(_) => unreachable!(), StorageLineTypeDef::Map(map) => { let key = &map.key; - quote!{{ + quote! {{ #data let data: &#scrate::sp_std::vec::Vec<(#key, #value_type)> = data; data.iter().for_each(|(k, v)| { @@ -110,7 +110,7 @@ impl BuilderDef { StorageLineTypeDef::DoubleMap(map) => { let key1 = &map.key1; let key2 = &map.key2; - quote!{{ + quote! {{ #data let data: &#scrate::sp_std::vec::Vec<(#key1, #key2, #value_type)> = data; data.iter().for_each(|(k1, k2, v)| { @@ -122,12 +122,8 @@ impl BuilderDef { }, StorageLineTypeDef::NMap(map) => { let key_tuple = map.to_key_tuple(); - let key_arg = if map.keys.len() == 1 { - quote!((k,)) - } else { - quote!(k) - }; - quote!{{ + let key_arg = if map.keys.len() == 1 { quote!((k,)) } else { quote!(k) }; + quote! {{ #data let data: &#scrate::sp_std::vec::Vec<(#key_tuple, #value_type)> = data; data.iter().for_each(|(k, v)| { @@ -148,10 +144,6 @@ impl BuilderDef { }); } - - Self { - blocks, - is_generic, - } + Self { blocks, is_generic } } } diff --git a/substrate/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/substrate/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index c54349136cf0500e3bebd1d5978d806cf482de35..fbdaab06b48955229a780863e11cac33d9c2c8c4 100644 --- a/substrate/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/substrate/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -17,11 +17,11 @@ //! Genesis config definition. +use super::super::{DeclStorageDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::TokenStream; -use syn::{spanned::Spanned, parse_quote}; use quote::quote; -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; +use syn::{parse_quote, spanned::Spanned}; pub struct GenesisConfigFieldDef { pub name: syn::Ident, @@ -47,30 +47,28 @@ impl GenesisConfigDef { pub fn from_def(def: &DeclStorageDefExt) -> syn::Result { let fields = Self::get_genesis_config_field_defs(def)?; - let is_generic = fields.iter() + let is_generic = fields + .iter() .any(|field| ext::type_contains_ident(&field.typ, &def.module_runtime_generic)); - let ( - genesis_struct_decl, - genesis_impl, - genesis_struct, - genesis_where_clause - ) = if is_generic { - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance = &def.optional_instance; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; - let where_clause = &def.where_clause; - ( - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>), - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>), - quote!(<#runtime_generic, #optional_instance>), - where_clause.clone(), - ) - } else { - (quote!(), quote!(), quote!(), None) - }; + let (genesis_struct_decl, genesis_impl, genesis_struct, genesis_where_clause) = + if is_generic { + let runtime_generic = &def.module_runtime_generic; + let runtime_trait = &def.module_runtime_trait; + let optional_instance = &def.optional_instance; + let optional_instance_bound = &def.optional_instance_bound; + let optional_instance_bound_optional_default = + &def.optional_instance_bound_optional_default; + let where_clause = &def.where_clause; + ( + quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>), + quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>), + quote!(<#runtime_generic, #optional_instance>), + where_clause.clone(), + ) + } else { + (quote!(), quote!(), quote!(), None) + }; Ok(Self { is_generic, @@ -82,14 +80,14 @@ impl GenesisConfigDef { }) } - fn get_genesis_config_field_defs(def: &DeclStorageDefExt) - -> syn::Result> - { + fn get_genesis_config_field_defs( + def: &DeclStorageDefExt, + ) -> syn::Result> { let mut config_field_defs = Vec::new(); - for (config_field, line) in def.storage_lines.iter() - .filter_map(|line| line.config.as_ref().map(|config_field| (config_field.clone(), line))) - { + for (config_field, line) in def.storage_lines.iter().filter_map(|line| { + line.config.as_ref().map(|config_field| (config_field.clone(), line)) + }) { let value_type = &line.value_type; let typ = match &line.storage_type { @@ -107,18 +105,20 @@ impl GenesisConfigDef { StorageLineTypeDef::NMap(map) => { let key_tuple = map.to_key_tuple(); parse_quote!( Vec<(#key_tuple, #value_type)> ) - } + }, }; - let default = line.default_value.as_ref() - .map(|d| { - if line.is_option { - quote!( #d.unwrap_or_default() ) - } else { - quote!( #d ) - } - }) - .unwrap_or_else(|| quote!( Default::default() )); + let default = + line.default_value + .as_ref() + .map(|d| { + if line.is_option { + quote!( #d.unwrap_or_default() ) + } else { + quote!( #d ) + } + }) + .unwrap_or_else(|| quote!(Default::default())); config_field_defs.push(GenesisConfigFieldDef { name: config_field, @@ -129,22 +129,26 @@ impl GenesisConfigDef { } for line in &def.extra_genesis_config_lines { - let attrs = line.attrs.iter() + let attrs = line + .attrs + .iter() .map(|attr| { let meta = attr.parse_meta()?; if meta.path().is_ident("cfg") { return Err(syn::Error::new( meta.span(), - "extra genesis config items do not support `cfg` attribute" - )); + "extra genesis config items do not support `cfg` attribute", + )) } Ok(meta) }) .collect::>()?; - let default = line.default.as_ref().map(|e| quote!( #e )) - .unwrap_or_else(|| quote!( Default::default() )); - + let default = line + .default + .as_ref() + .map(|e| quote!( #e )) + .unwrap_or_else(|| quote!(Default::default())); config_field_defs.push(GenesisConfigFieldDef { name: line.name.clone(), diff --git a/substrate/frame/support/procedural/src/storage/genesis_config/mod.rs b/substrate/frame/support/procedural/src/storage/genesis_config/mod.rs index abc7af729f064a0ca0865fe3ea96a61aa959d2b2..d2d1afb0177360251c869d4e54697a4fd5c2c1d5 100644 --- a/substrate/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/substrate/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -18,14 +18,14 @@ //! Declaration of genesis config structure and implementation of build storage trait and //! functions. -use proc_macro2::{TokenStream, Span}; -use quote::quote; use super::DeclStorageDefExt; -pub use genesis_config_def::GenesisConfigDef; pub use builder_def::BuilderDef; +pub use genesis_config_def::GenesisConfigDef; +use proc_macro2::{Span, TokenStream}; +use quote::quote; -mod genesis_config_def; mod builder_def; +mod genesis_config_def; const DEFAULT_INSTANCE_NAME: &str = "__GeneratedInstance"; @@ -118,19 +118,16 @@ fn impl_build_storage( let genesis_impl = &genesis_config.genesis_impl; let genesis_where_clause = &genesis_config.genesis_where_clause; - let ( - fn_generic, - fn_traitinstance, - fn_where_clause - ) = if !genesis_config.is_generic && builders.is_generic { - ( - quote!( <#runtime_generic: #runtime_trait, #optional_instance_bound> ), - quote!( #runtime_generic, #optional_instance ), - Some(&def.where_clause), - ) - } else { - (quote!(), quote!(), None) - }; + let (fn_generic, fn_traitinstance, fn_where_clause) = + if !genesis_config.is_generic && builders.is_generic { + ( + quote!( <#runtime_generic: #runtime_trait, #optional_instance_bound> ), + quote!( #runtime_generic, #optional_instance ), + Some(&def.where_clause), + ) + } else { + (quote!(), quote!(), None) + }; let builder_blocks = &builders.blocks; @@ -138,7 +135,7 @@ fn impl_build_storage( #scrate::sp_runtime::BuildModuleGenesisStorage<#runtime_generic, #inherent_instance> ); - quote!{ + quote! { #[cfg(feature = "std")] impl#genesis_impl GenesisConfig#genesis_struct #genesis_where_clause { /// Build the storage for this module. @@ -189,7 +186,7 @@ pub fn genesis_config_and_build_storage(def: &DeclStorageDefExt) -> TokenStream decl_genesis_config_and_impl_default(scrate, &genesis_config); let impl_build_storage = impl_build_storage(scrate, def, &genesis_config, &builders); - quote!{ + quote! { #decl_genesis_config_and_impl_default #impl_build_storage } diff --git a/substrate/frame/support/procedural/src/storage/getters.rs b/substrate/frame/support/procedural/src/storage/getters.rs index 32155239acdc6619bda4982f8be996d100d617e5..988e6fa096243795d03cc6381d3a6e953010bc12 100644 --- a/substrate/frame/support/procedural/src/storage/getters.rs +++ b/substrate/frame/support/procedural/src/storage/getters.rs @@ -17,15 +17,17 @@ //! Implementation of getters on module structure. +use super::{DeclStorageDefExt, StorageLineTypeDef}; use proc_macro2::TokenStream; use quote::quote; -use super::{DeclStorageDefExt, StorageLineTypeDef}; pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { let scrate = &def.hidden_crate; let mut getters = TokenStream::new(); - for (get_fn, line) in def.storage_lines.iter() + for (get_fn, line) in def + .storage_lines + .iter() .filter_map(|line| line.getter.as_ref().map(|get_fn| (get_fn, line))) { let attrs = &line.doc_attrs; @@ -35,7 +37,7 @@ pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { let getter = match &line.storage_type { StorageLineTypeDef::Simple(value) => { - quote!{ + quote! { #( #[ #attrs ] )* pub fn #get_fn() -> #value { <#storage_struct as #scrate::#storage_trait>::get() @@ -45,7 +47,7 @@ pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { StorageLineTypeDef::Map(map) => { let key = &map.key; let value = &map.value; - quote!{ + quote! { #( #[ #attrs ] )* pub fn #get_fn>(key: K) -> #value { <#storage_struct as #scrate::#storage_trait>::get(key) @@ -56,7 +58,7 @@ pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { let key1 = &map.key1; let key2 = &map.key2; let value = &map.value; - quote!{ + quote! { pub fn #get_fn(k1: KArg1, k2: KArg2) -> #value where KArg1: #scrate::codec::EncodeLike<#key1>, @@ -69,7 +71,7 @@ pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { StorageLineTypeDef::NMap(map) => { let keygen = map.to_keygen_struct(&def.hidden_crate); let value = &map.value; - quote!{ + quote! { pub fn #get_fn(key: KArg) -> #value where KArg: #scrate::storage::types::EncodeLikeTuple< @@ -80,7 +82,7 @@ pub fn impl_getters(def: &DeclStorageDefExt) -> TokenStream { <#storage_struct as #scrate::#storage_trait>::get(key) } } - } + }, }; getters.extend(getter); } diff --git a/substrate/frame/support/procedural/src/storage/instance_trait.rs b/substrate/frame/support/procedural/src/storage/instance_trait.rs index 55f6ef478054ce6e79507b1d33c44df9042ac91e..4f55d38596666f2ae11ef16475a5023a9238ffd2 100644 --- a/substrate/frame/support/procedural/src/storage/instance_trait.rs +++ b/substrate/frame/support/procedural/src/storage/instance_trait.rs @@ -18,10 +18,10 @@ //! Implementation of the trait instance and the instance structures implementing it. //! (For not instantiable traits there is still the inherent instance implemented). -use proc_macro2::{TokenStream, Span}; -use quote::quote; use super::DeclStorageDefExt; use crate::NUMBER_OF_INSTANCE; +use proc_macro2::{Span, TokenStream}; +use quote::quote; pub(crate) const INHERENT_INSTANCE_NAME: &str = "__InherentHiddenInstance"; @@ -52,14 +52,12 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { index: i, } }) - .chain( - module_instance.instance_default.as_ref().map(|ident| InstanceDef { - prefix: String::new(), - instance_struct: ident.clone(), - doc: quote!(#[doc=r"Default module instance"]), - index: 0, - }) - ); + .chain(module_instance.instance_default.as_ref().map(|ident| InstanceDef { + prefix: String::new(), + instance_struct: ident.clone(), + doc: quote!(#[doc=r"Default module instance"]), + index: 0, + })); for instance_def in instance_defs { impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); @@ -70,8 +68,8 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { let inherent_instance = syn::Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()); // Implementation of inherent instance. - if let Some(default_instance) = def.module_instance.as_ref() - .and_then(|i| i.instance_default.as_ref()) + if let Some(default_instance) = + def.module_instance.as_ref().and_then(|i| i.instance_default.as_ref()) { impls.extend(quote! { /// Hidden instance generated to be internally used when module is used without @@ -97,10 +95,7 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { impls } -fn reexport_instance_trait( - scrate: &TokenStream, - def: &DeclStorageDefExt, -) -> TokenStream { +fn reexport_instance_trait(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { if let Some(i) = def.module_instance.as_ref() { let instance_trait = &i.instance_trait; quote!( diff --git a/substrate/frame/support/procedural/src/storage/metadata.rs b/substrate/frame/support/procedural/src/storage/metadata.rs index 8a42dd4308d12dcb56fe744005e39049fdb61df9..ca7dd97c155f399d66e062b8c132cae6b007b98f 100644 --- a/substrate/frame/support/procedural/src/storage/metadata.rs +++ b/substrate/frame/support/procedural/src/storage/metadata.rs @@ -17,17 +17,17 @@ //! Implementation of `storage_metadata` on module structure, used by construct_runtime. +use super::{DeclStorageDefExt, StorageLineDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::clean_type_string; use proc_macro2::TokenStream; use quote::quote; -use super::{DeclStorageDefExt, StorageLineDefExt, StorageLineTypeDef}; fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> TokenStream { let value_type = &line.value_type; let value_type = clean_type_string("e!( #value_type ).to_string()); match &line.storage_type { StorageLineTypeDef::Simple(_) => { - quote!{ + quote! { #scrate::metadata::StorageEntryType::Plain( #scrate::metadata::DecodeDifferent::Encode(#value_type), ) @@ -37,7 +37,7 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> let hasher = map.hasher.into_metadata(); let key = &map.key; let key = clean_type_string("e!(#key).to_string()); - quote!{ + quote! { #scrate::metadata::StorageEntryType::Map { hasher: #scrate::metadata::#hasher, key: #scrate::metadata::DecodeDifferent::Encode(#key), @@ -53,7 +53,7 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> let key1 = clean_type_string("e!(#key1).to_string()); let key2 = &map.key2; let key2 = clean_type_string("e!(#key2).to_string()); - quote!{ + quote! { #scrate::metadata::StorageEntryType::DoubleMap { hasher: #scrate::metadata::#hasher1, key1: #scrate::metadata::DecodeDifferent::Encode(#key1), @@ -64,15 +64,17 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> } }, StorageLineTypeDef::NMap(map) => { - let keys = map.keys + let keys = map + .keys .iter() .map(|key| clean_type_string("e!(#key).to_string())) .collect::>(); - let hashers = map.hashers + let hashers = map + .hashers .iter() .map(|hasher| hasher.to_storage_hasher_struct()) .collect::>(); - quote!{ + quote! { #scrate::metadata::StorageEntryType::NMap { keys: #scrate::metadata::DecodeDifferent::Encode(&[ #( #keys, )* @@ -83,7 +85,7 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> value: #scrate::metadata::DecodeDifferent::Encode(#value_type), } } - } + }, } } @@ -92,12 +94,17 @@ fn default_byte_getter( line: &StorageLineDefExt, def: &DeclStorageDefExt, ) -> (TokenStream, TokenStream) { - let default = line.default_value.as_ref().map(|d| quote!( #d )) - .unwrap_or_else(|| quote!( Default::default() )); + let default = line + .default_value + .as_ref() + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!(Default::default())); let str_name = line.name.to_string(); - let struct_name = syn::Ident::new(&("__GetByteStruct".to_string() + &str_name), line.name.span()); - let cache_name = syn::Ident::new(&("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), line.name.span()); + let struct_name = + syn::Ident::new(&("__GetByteStruct".to_string() + &str_name), line.name.span()); + let cache_name = + syn::Ident::new(&("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), line.name.span()); let runtime_generic = &def.module_runtime_generic; let runtime_trait = &def.module_runtime_trait; @@ -177,10 +184,8 @@ pub fn impl_metadata(def: &DeclStorageDefExt) -> TokenStream { let ty = storage_line_metadata_type(scrate, line); - let ( - default_byte_getter_struct_def, - default_byte_getter_struct_instance, - ) = default_byte_getter(scrate, line, def); + let (default_byte_getter_struct_def, default_byte_getter_struct_instance) = + default_byte_getter(scrate, line, def); let mut docs = TokenStream::new(); for attr in line.attrs.iter().filter_map(|v| v.parse_meta().ok()) { diff --git a/substrate/frame/support/procedural/src/storage/mod.rs b/substrate/frame/support/procedural/src/storage/mod.rs index 570ef447a43cb0de0128a5eca3cc0852e6d03981..27964d7012a2807c8ba1457149e5467d26759272 100644 --- a/substrate/frame/support/procedural/src/storage/mod.rs +++ b/substrate/frame/support/procedural/src/storage/mod.rs @@ -17,22 +17,22 @@ //! `decl_storage` input definition and expansion. -mod storage_struct; -mod storage_info; -mod parse; -mod store_trait; +mod genesis_config; mod getters; -mod metadata; mod instance_trait; -mod genesis_config; +mod metadata; +mod parse; mod print_pallet_upgrade; +mod storage_info; +mod storage_struct; +mod store_trait; pub(crate) use instance_trait::INHERENT_INSTANCE_NAME; -use quote::quote; use frame_support_procedural_tools::{ - generate_crate_access, generate_hidden_includes, syn_ext as ext + generate_crate_access, generate_hidden_includes, syn_ext as ext, }; +use quote::quote; /// All information contained in input of decl_storage pub struct DeclStorageDef { @@ -115,34 +115,37 @@ pub struct DeclStorageDefExt { impl From for DeclStorageDefExt { fn from(mut def: DeclStorageDef) -> Self { - let hidden_crate_name = def.hidden_crate.as_ref().map(|i| i.to_string()) + let hidden_crate_name = def + .hidden_crate + .as_ref() + .map(|i| i.to_string()) .unwrap_or_else(|| "decl_storage".to_string()); let hidden_crate = generate_crate_access(&hidden_crate_name, "frame-support"); let hidden_imports = generate_hidden_includes(&hidden_crate_name, "frame-support"); let storage_lines = def.storage_lines.drain(..).collect::>(); - let storage_lines = storage_lines.into_iter() + let storage_lines = storage_lines + .into_iter() .map(|line| StorageLineDefExt::from_def(line, &def, &hidden_crate)) .collect(); - let ( - optional_instance, - optional_instance_bound, - optional_instance_bound_optional_default, - ) = if let Some(instance) = def.module_instance.as_ref() { - let instance_generic = &instance.instance_generic; - let instance_trait= &instance.instance_trait; - let optional_equal_instance_default = instance.instance_default.as_ref() - .map(|d| quote!( = #d )); - ( - Some(quote!(#instance_generic)), - Some(quote!(#instance_generic: #instance_trait)), - Some(quote!(#instance_generic: #instance_trait #optional_equal_instance_default)), - ) - } else { - (None, None, None) - }; + let (optional_instance, optional_instance_bound, optional_instance_bound_optional_default) = + if let Some(instance) = def.module_instance.as_ref() { + let instance_generic = &instance.instance_generic; + let instance_trait = &instance.instance_trait; + let optional_equal_instance_default = + instance.instance_default.as_ref().map(|d| quote!( = #d )); + ( + Some(quote!(#instance_generic)), + Some(quote!(#instance_generic: #instance_trait)), + Some( + quote!(#instance_generic: #instance_trait #optional_equal_instance_default), + ), + ) + } else { + (None, None, None) + }; let module_runtime_generic = &def.module_runtime_generic; let module_runtime_trait = &def.module_runtime_trait; @@ -255,22 +258,20 @@ impl StorageLineDefExt { hidden_crate: &proc_macro2::TokenStream, ) -> Self { let is_generic = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => { - ext::type_contains_ident(&value, &def.module_runtime_generic) - }, - StorageLineTypeDef::Map(map) => { - ext::type_contains_ident(&map.key, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } - StorageLineTypeDef::DoubleMap(map) => { - ext::type_contains_ident(&map.key1, &def.module_runtime_generic) - || ext::type_contains_ident(&map.key2, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } - StorageLineTypeDef::NMap(map) => { - map.keys.iter().any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } + StorageLineTypeDef::Simple(value) => + ext::type_contains_ident(&value, &def.module_runtime_generic), + StorageLineTypeDef::Map(map) => + ext::type_contains_ident(&map.key, &def.module_runtime_generic) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), + StorageLineTypeDef::DoubleMap(map) => + ext::type_contains_ident(&map.key1, &def.module_runtime_generic) || + ext::type_contains_ident(&map.key2, &def.module_runtime_generic) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), + StorageLineTypeDef::NMap(map) => + map.keys + .iter() + .any(|key| ext::type_contains_ident(key, &def.module_runtime_generic)) || + ext::type_contains_ident(&map.value, &def.module_runtime_generic), }; let query_type = match &storage_def.storage_type { @@ -280,15 +281,13 @@ impl StorageLineDefExt { StorageLineTypeDef::NMap(map) => map.value.clone(), }; let is_option = ext::extract_type_option(&query_type).is_some(); - let value_type = ext::extract_type_option(&query_type).unwrap_or_else(|| query_type.clone()); + let value_type = + ext::extract_type_option(&query_type).unwrap_or_else(|| query_type.clone()); let module_runtime_generic = &def.module_runtime_generic; let module_runtime_trait = &def.module_runtime_trait; - let optional_storage_runtime_comma = if is_generic { - Some(quote!( #module_runtime_generic, )) - } else { - None - }; + let optional_storage_runtime_comma = + if is_generic { Some(quote!( #module_runtime_generic, )) } else { None }; let optional_storage_runtime_bound_comma = if is_generic { Some(quote!( #module_runtime_generic: #module_runtime_trait, )) } else { @@ -304,11 +303,8 @@ impl StorageLineDefExt { #storage_name<#optional_storage_runtime_comma #optional_instance_generic> ); - let optional_storage_where_clause = if is_generic { - def.where_clause.as_ref().map(|w| quote!( #w )) - } else { - None - }; + let optional_storage_where_clause = + if is_generic { def.where_clause.as_ref().map(|w| quote!( #w )) } else { None }; let storage_trait_truncated = match &storage_def.storage_type { StorageLineTypeDef::Simple(_) => { @@ -326,13 +322,15 @@ impl StorageLineDefExt { StorageLineTypeDef::NMap(map) => { let keygen = map.to_keygen_struct(hidden_crate); quote!( StorageNMap<#keygen, #value_type> ) - } + }, }; let storage_trait = quote!( storage::#storage_trait_truncated ); let storage_generator_trait = quote!( storage::generator::#storage_trait_truncated ); - let doc_attrs = storage_def.attrs.iter() + let doc_attrs = storage_def + .attrs + .iter() .filter_map(|a| a.parse_meta().ok()) .filter(|m| m.path().is_ident("doc")) .collect(); @@ -396,27 +394,28 @@ impl NMapDef { if self.keys.len() == 1 { let hasher = &self.hashers[0].to_storage_hasher_struct(); let key = &self.keys[0]; - return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ); + return quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) } - let key_hasher = self.keys.iter().zip(&self.hashers).map(|(key, hasher)| { - let hasher = hasher.to_storage_hasher_struct(); - quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) - }) - .collect::>(); + let key_hasher = self + .keys + .iter() + .zip(&self.hashers) + .map(|(key, hasher)| { + let hasher = hasher.to_storage_hasher_struct(); + quote!( #scrate::storage::types::Key<#scrate::#hasher, #key> ) + }) + .collect::>(); quote!(( #(#key_hasher,)* )) } fn to_key_tuple(&self) -> proc_macro2::TokenStream { if self.keys.len() == 1 { let key = &self.keys[0]; - return quote!(#key); + return quote!(#key) } - let tuple = self.keys.iter().map(|key| { - quote!(#key) - }) - .collect::>(); + let tuple = self.keys.iter().map(|key| quote!(#key)).collect::>(); quote!(( #(#tuple,)* )) } } @@ -442,25 +441,25 @@ pub enum HasherKind { impl HasherKind { fn to_storage_hasher_struct(&self) -> proc_macro2::TokenStream { match self { - HasherKind::Blake2_256 => quote!( Blake2_256 ), - HasherKind::Blake2_128 => quote!( Blake2_128 ), - HasherKind::Blake2_128Concat => quote!( Blake2_128Concat ), - HasherKind::Twox256 => quote!( Twox256 ), - HasherKind::Twox128 => quote!( Twox128 ), - HasherKind::Twox64Concat => quote!( Twox64Concat ), - HasherKind::Identity => quote!( Identity ), + HasherKind::Blake2_256 => quote!(Blake2_256), + HasherKind::Blake2_128 => quote!(Blake2_128), + HasherKind::Blake2_128Concat => quote!(Blake2_128Concat), + HasherKind::Twox256 => quote!(Twox256), + HasherKind::Twox128 => quote!(Twox128), + HasherKind::Twox64Concat => quote!(Twox64Concat), + HasherKind::Identity => quote!(Identity), } } fn into_metadata(&self) -> proc_macro2::TokenStream { match self { - HasherKind::Blake2_256 => quote!( StorageHasher::Blake2_256 ), - HasherKind::Blake2_128 => quote!( StorageHasher::Blake2_128 ), - HasherKind::Blake2_128Concat => quote!( StorageHasher::Blake2_128Concat ), - HasherKind::Twox256 => quote!( StorageHasher::Twox256 ), - HasherKind::Twox128 => quote!( StorageHasher::Twox128 ), - HasherKind::Twox64Concat => quote!( StorageHasher::Twox64Concat ), - HasherKind::Identity => quote!( StorageHasher::Identity ), + HasherKind::Blake2_256 => quote!(StorageHasher::Blake2_256), + HasherKind::Blake2_128 => quote!(StorageHasher::Blake2_128), + HasherKind::Blake2_128Concat => quote!(StorageHasher::Blake2_128Concat), + HasherKind::Twox256 => quote!(StorageHasher::Twox256), + HasherKind::Twox128 => quote!(StorageHasher::Twox128), + HasherKind::Twox64Concat => quote!(StorageHasher::Twox64Concat), + HasherKind::Identity => quote!(StorageHasher::Identity), } } } @@ -502,5 +501,6 @@ pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStr #genesis_config #storage_struct #storage_info - ).into() + ) + .into() } diff --git a/substrate/frame/support/procedural/src/storage/parse.rs b/substrate/frame/support/procedural/src/storage/parse.rs index ca97b7957c108bdfa5b9ea7e51d4ffd693b7e4ef..d3b73843da1790dde9232846174c5f169d5344d4 100644 --- a/substrate/frame/support/procedural/src/storage/parse.rs +++ b/substrate/frame/support/procedural/src/storage/parse.rs @@ -17,8 +17,8 @@ //! Parsing of decl_storage input. -use frame_support_procedural_tools::{ToTokens, Parse, syn_ext as ext}; -use syn::{Ident, Token, spanned::Spanned}; +use frame_support_procedural_tools::{syn_ext as ext, Parse, ToTokens}; +use syn::{spanned::Spanned, Ident, Token}; mod keyword { syn::custom_keyword!(generate_storage_info); @@ -367,48 +367,35 @@ fn get_module_instance( it is now defined at frame_support::traits::Instance. Expect `Instance` found `{}`", instantiable.as_ref().unwrap(), ); - return Err(syn::Error::new(instantiable.span(), msg)); + return Err(syn::Error::new(instantiable.span(), msg)) } match (instance, instantiable, default_instance) { - (Some(instance), Some(instantiable), default_instance) => { + (Some(instance), Some(instantiable), default_instance) => Ok(Some(super::ModuleInstanceDef { instance_generic: instance, instance_trait: instantiable, instance_default: default_instance, - })) - }, + })), (None, None, None) => Ok(None), - (Some(instance), None, _) => Err( - syn::Error::new( - instance.span(), - format!( - "Expect instantiable trait bound for instance: {}. {}", - instance, - right_syntax, - ) - ) - ), - (None, Some(instantiable), _) => Err( - syn::Error::new( - instantiable.span(), - format!( - "Expect instance generic for bound instantiable: {}. {}", - instantiable, - right_syntax, - ) - ) - ), - (None, _, Some(default_instance)) => Err( - syn::Error::new( - default_instance.span(), - format!( - "Expect instance generic for default instance: {}. {}", - default_instance, - right_syntax, - ) - ) - ), + (Some(instance), None, _) => Err(syn::Error::new( + instance.span(), + format!("Expect instantiable trait bound for instance: {}. {}", instance, right_syntax,), + )), + (None, Some(instantiable), _) => Err(syn::Error::new( + instantiable.span(), + format!( + "Expect instance generic for bound instantiable: {}. {}", + instantiable, right_syntax, + ), + )), + (None, _, Some(default_instance)) => Err(syn::Error::new( + default_instance.span(), + format!( + "Expect instance generic for default instance: {}. {}", + default_instance, right_syntax, + ), + )), } } @@ -417,37 +404,37 @@ pub fn parse(input: syn::parse::ParseStream) -> syn::Result { - extra_genesis_config_lines.push(super::ExtraGenesisLineDef{ + extra_genesis_config_lines.push(super::ExtraGenesisLineDef { attrs: def.attrs.inner, name: def.extra_field.content, typ: def.extra_type, default: def.default_value.inner.map(|o| o.expr), }); - } + }, AddExtraGenesisLineEnum::AddExtraGenesisBuild(def) => { if extra_genesis_build.is_some() { return Err(syn::Error::new( def.span(), - "Only one build expression allowed for extra genesis" + "Only one build expression allowed for extra genesis", )) } extra_genesis_build = Some(def.expr.content); - } + }, } } @@ -496,68 +483,65 @@ fn parse_storage_line_defs( }; if let Some(ref config) = config { - storage_lines.iter().filter_map(|sl| sl.config.as_ref()).try_for_each(|other_config| { - if other_config == config { - Err(syn::Error::new( - config.span(), - "`config()`/`get()` with the same name already defined.", - )) - } else { - Ok(()) - } - })?; + storage_lines.iter().filter_map(|sl| sl.config.as_ref()).try_for_each( + |other_config| { + if other_config == config { + Err(syn::Error::new( + config.span(), + "`config()`/`get()` with the same name already defined.", + )) + } else { + Ok(()) + } + }, + )?; } let max_values = match &line.storage_type { - DeclStorageType::Map(_) | DeclStorageType::DoubleMap(_) | DeclStorageType::NMap(_) => { - line.max_values.inner.map(|i| i.expr.content) - }, - DeclStorageType::Simple(_) => { + DeclStorageType::Map(_) | DeclStorageType::DoubleMap(_) | DeclStorageType::NMap(_) => + line.max_values.inner.map(|i| i.expr.content), + DeclStorageType::Simple(_) => if let Some(max_values) = line.max_values.inner { let msg = "unexpected max_values attribute for storage value."; let span = max_values.max_values_keyword.span(); - return Err(syn::Error::new(span, msg)); + return Err(syn::Error::new(span, msg)) } else { Some(syn::parse_quote!(1u32)) - } - }, + }, }; let span = line.storage_type.span(); - let no_hasher_error = || syn::Error::new( - span, - "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead." - ); + let no_hasher_error = || { + syn::Error::new( + span, + "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead.", + ) + }; let storage_type = match line.storage_type { - DeclStorageType::Map(map) => super::StorageLineTypeDef::Map( - super::MapDef { - hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), - key: map.key, - value: map.value, - } - ), - DeclStorageType::DoubleMap(map) => super::StorageLineTypeDef::DoubleMap( - Box::new(super::DoubleMapDef { + DeclStorageType::Map(map) => super::StorageLineTypeDef::Map(super::MapDef { + hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), + key: map.key, + value: map.value, + }), + DeclStorageType::DoubleMap(map) => + super::StorageLineTypeDef::DoubleMap(Box::new(super::DoubleMapDef { hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), hasher2: map.hasher2.inner.ok_or_else(no_hasher_error)?.into(), key1: map.key1, key2: map.key2, value: map.value, - }) - ), - DeclStorageType::NMap(map) => super::StorageLineTypeDef::NMap( - super::NMapDef { - hashers: map - .storage_keys - .inner - .iter() - .map(|pair| Ok(pair.hasher.inner.clone().ok_or_else(no_hasher_error)?.into())) - .collect::, syn::Error>>()?, - keys: map.storage_keys.inner.iter().map(|pair| pair.key.clone()).collect(), - value: map.value, - } - ), + })), + DeclStorageType::NMap(map) => super::StorageLineTypeDef::NMap(super::NMapDef { + hashers: map + .storage_keys + .inner + .iter() + .map(|pair| Ok(pair.hasher.inner.clone().ok_or_else(no_hasher_error)?.into())) + .collect::, syn::Error>>()?, + keys: map.storage_keys.inner.iter().map(|pair| pair.key.clone()).collect(), + value: map.value, + }), DeclStorageType::Simple(expr) => super::StorageLineTypeDef::Simple(expr), }; diff --git a/substrate/frame/support/procedural/src/storage/print_pallet_upgrade.rs b/substrate/frame/support/procedural/src/storage/print_pallet_upgrade.rs index a6f64a588b6333efcbe3b4a5edfb493a0e925999..03f09a7edb48e3a364a8fd76ba84742bd57576d2 100644 --- a/substrate/frame/support/procedural/src/storage/print_pallet_upgrade.rs +++ b/substrate/frame/support/procedural/src/storage/print_pallet_upgrade.rs @@ -1,6 +1,6 @@ use super::StorageLineTypeDef; -use quote::ToTokens; use frame_support_procedural_tools::clean_type_string; +use quote::ToTokens; /// Environment variable that tells us to print pallet upgrade helper. const PRINT_PALLET_UPGRADE: &str = "PRINT_PALLET_UPGRADE"; @@ -10,7 +10,7 @@ fn check_print_pallet_upgrade() -> bool { } /// Convert visibilty as now objects are defined in a module. -fn convert_vis(vis: &syn::Visibility) -> &'static str{ +fn convert_vis(vis: &syn::Visibility) -> &'static str { match vis { syn::Visibility::Inherited => "pub(super)", syn::Visibility::Public(_) => "pub", @@ -31,23 +31,13 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { let scrate = "e::quote!(frame_support); - let config_gen = if def.optional_instance.is_some() { - "" - } else { - Default::default() - }; + let config_gen = + if def.optional_instance.is_some() { "" } else { Default::default() }; - let impl_gen = if def.optional_instance.is_some() { - ", I: 'static>" - } else { - "" - }; + let impl_gen = + if def.optional_instance.is_some() { ", I: 'static>" } else { "" }; - let decl_gen = if def.optional_instance.is_some() { - "" - } else { - "" - }; + let decl_gen = if def.optional_instance.is_some() { "" } else { "" }; let full_decl_gen = if def.optional_instance.is_some() { ", I: 'static = ()>" @@ -55,17 +45,9 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { "" }; - let use_gen = if def.optional_instance.is_some() { - "" - } else { - "" - }; + let use_gen = if def.optional_instance.is_some() { "" } else { "" }; - let use_gen_tuple = if def.optional_instance.is_some() { - "<(T, I)>" - } else { - "" - }; + let use_gen_tuple = if def.optional_instance.is_some() { "<(T, I)>" } else { "" }; let mut genesis_config = String::new(); let mut genesis_build = String::new(); @@ -80,17 +62,11 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { }, }; - let genesis_config_impl_gen = if genesis_config_def.is_generic { - impl_gen - } else { - Default::default() - }; + let genesis_config_impl_gen = + if genesis_config_def.is_generic { impl_gen } else { Default::default() }; - let genesis_config_use_gen = if genesis_config_def.is_generic { - use_gen - } else { - Default::default() - }; + let genesis_config_use_gen = + if genesis_config_def.is_generic { use_gen } else { Default::default() }; let genesis_config_decl_gen = if genesis_config_def.is_generic { if def.optional_instance.is_some() { @@ -105,26 +81,31 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { let mut genesis_config_decl_fields = String::new(); let mut genesis_config_default_fields = String::new(); for field in &genesis_config_def.fields { - genesis_config_decl_fields.push_str(&format!(" + genesis_config_decl_fields.push_str(&format!( + " {attrs}pub {name}: {typ},", - attrs = field.attrs.iter() - .fold(String::new(), |res, attr| { - format!("{}#[{}] + attrs = field.attrs.iter().fold(String::new(), |res, attr| { + format!( + "{}#[{}] ", - res, attr.to_token_stream()) - }), + res, + attr.to_token_stream() + ) + }), name = field.name, typ = to_cleaned_string(&field.typ), )); - genesis_config_default_fields.push_str(&format!(" + genesis_config_default_fields.push_str(&format!( + " {name}: {default},", name = field.name, default = to_cleaned_string(&field.default), )); } - genesis_config = format!(" + genesis_config = format!( + " #[pallet::genesis_config] pub struct GenesisConfig{genesis_config_decl_gen} // TODO_MAYBE_WHERE_CLAUSE @@ -147,16 +128,18 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { genesis_config_use_gen = genesis_config_use_gen, ); - let genesis_config_build = genesis_config_builder_def.blocks.iter() - .fold(String::new(), |res, block| { - format!("{} + let genesis_config_build = + genesis_config_builder_def.blocks.iter().fold(String::new(), |res, block| { + format!( + "{} {}", res, to_cleaned_string(block), ) }); - genesis_build = format!(" + genesis_build = format!( + " #[pallet::genesis_build] impl{impl_gen} GenesisBuild{use_gen} for GenesisConfig{genesis_config_use_gen} // TODO_MAYBE_WHERE_CLAUSE @@ -176,7 +159,8 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { let storage_vis = convert_vis(&line.visibility); let getter = if let Some(getter) = &line.getter { - format!(" + format!( + " #[pallet::getter(fn {getter})]", getter = getter ) @@ -186,9 +170,12 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { let value_type = &line.value_type; - let default_value_type_value = line.default_value.as_ref() + let default_value_type_value = line + .default_value + .as_ref() .map(|default_expr| { - format!(" + format!( + " #[pallet::type_value] {storage_vis} fn DefaultFor{name} /* TODO_MAYBE_GENERICS */ () -> {value_type} {{ {default_expr} @@ -212,13 +199,16 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { ", ValueQuery" }; - let comma_default_value_getter_name = line.default_value.as_ref() + let comma_default_value_getter_name = line + .default_value + .as_ref() .map(|_| format!(", DefaultFor{}", line.name)) .unwrap_or_else(String::new); let typ = match &line.storage_type { StorageLineTypeDef::Map(map) => { - format!("StorageMap<_, {hasher}, {key}, {value_type}{comma_query_kind}\ + format!( + "StorageMap<_, {hasher}, {key}, {value_type}{comma_query_kind}\ {comma_default_value_getter_name}>", hasher = &map.hasher.to_storage_hasher_struct(), key = to_cleaned_string(&map.key), @@ -228,7 +218,8 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { ) }, StorageLineTypeDef::DoubleMap(double_map) => { - format!("StorageDoubleMap<_, {hasher1}, {key1}, {hasher2}, {key2}, {value_type}\ + format!( + "StorageDoubleMap<_, {hasher1}, {key1}, {hasher2}, {key2}, {value_type}\ {comma_query_kind}{comma_default_value_getter_name}>", hasher1 = double_map.hasher1.to_storage_hasher_struct(), key1 = to_cleaned_string(&double_map.key1), @@ -240,16 +231,18 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { ) }, StorageLineTypeDef::NMap(map) => { - format!("StorageNMap<_, {keygen}, {value_type}{comma_query_kind}\ + format!( + "StorageNMap<_, {keygen}, {value_type}{comma_query_kind}\ {comma_default_value_getter_name}>", keygen = map.to_keygen_struct(&def.hidden_crate), value_type = to_cleaned_string(&value_type), comma_query_kind = comma_query_kind, comma_default_value_getter_name = comma_default_value_getter_name, ) - } + }, StorageLineTypeDef::Simple(_) => { - format!("StorageValue<_, {value_type}{comma_query_kind}\ + format!( + "StorageValue<_, {value_type}{comma_query_kind}\ {comma_default_value_getter_name}>", value_type = to_cleaned_string(&value_type), comma_query_kind = comma_query_kind, @@ -265,7 +258,8 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { "" }; - storages.push_str(&format!(" + storages.push_str(&format!( + " {default_value_type_value}{doc} #[pallet::storage]{getter} {storage_vis} type {name}{full_decl_gen} = {typ};{additional_comment}", @@ -276,21 +270,21 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { full_decl_gen = full_decl_gen, typ = typ, additional_comment = additional_comment, - doc = line.doc_attrs.iter() - .fold(String::new(), |mut res, attr| { - if let syn::Meta::NameValue(name_value) = attr { - if name_value.path.is_ident("doc") { - if let syn::Lit::Str(string) = &name_value.lit { - res = format!("{} + doc = line.doc_attrs.iter().fold(String::new(), |mut res, attr| { + if let syn::Meta::NameValue(name_value) = attr { + if name_value.path.is_ident("doc") { + if let syn::Lit::Str(string) = &name_value.lit { + res = format!( + "{} ///{}", - res, - string.value(), - ); - } + res, + string.value(), + ); } } - res - }), + } + res + }), )); } @@ -308,7 +302,8 @@ pub fn maybe_print_pallet_upgrade(def: &super::DeclStorageDefExt) { "" }; - println!(" + println!( + " // Template for pallet upgrade for {pallet_name} pub use pallet::*; diff --git a/substrate/frame/support/procedural/src/storage/storage_info.rs b/substrate/frame/support/procedural/src/storage/storage_info.rs index c7707f6cb724be564d703ea9a5b5a746adc2d08d..844896409f85184845083d5ed975ef8fb62e747c 100644 --- a/substrate/frame/support/procedural/src/storage/storage_info.rs +++ b/substrate/frame/support/procedural/src/storage/storage_info.rs @@ -17,9 +17,9 @@ //! Implementation of trait `StorageInfoTrait` on module structure. +use super::DeclStorageDefExt; use proc_macro2::TokenStream; use quote::quote; -use super::DeclStorageDefExt; pub fn impl_storage_info(def: &DeclStorageDefExt) -> TokenStream { let scrate = &def.hidden_crate; diff --git a/substrate/frame/support/procedural/src/storage/storage_struct.rs b/substrate/frame/support/procedural/src/storage/storage_struct.rs index 3b182983cd4eae26d00c382f49e888379a940a47..b318225681c1d9094991658341d7f7c63f8d2edb 100644 --- a/substrate/frame/support/procedural/src/storage/storage_struct.rs +++ b/substrate/frame/support/procedural/src/storage/storage_struct.rs @@ -17,16 +17,15 @@ //! Implementation of storage structures and implementation of storage traits on them. -use proc_macro2::{TokenStream, Ident, Span}; +use super::{instance_trait::INHERENT_INSTANCE_NAME, DeclStorageDefExt, StorageLineTypeDef}; +use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use super::{ - DeclStorageDefExt, StorageLineTypeDef, - instance_trait::INHERENT_INSTANCE_NAME, -}; fn from_optional_value_to_query(is_option: bool, default: &Option) -> TokenStream { - let default = default.as_ref().map(|d| quote!( #d )) - .unwrap_or_else(|| quote!( Default::default() )); + let default = default + .as_ref() + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!(Default::default())); if !is_option { // raw type case @@ -40,10 +39,10 @@ fn from_optional_value_to_query(is_option: bool, default: &Option) -> fn from_query_to_optional_value(is_option: bool) -> TokenStream { if !is_option { // raw type case - quote!( Some(v) ) + quote!(Some(v)) } else { // Option<> type case - quote!( v ) + quote!(v) } } @@ -52,7 +51,6 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { let mut impls = TokenStream::new(); for line in &def.storage_lines { - // Propagate doc attributes. let attrs = &line.doc_attrs; @@ -60,7 +58,8 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { let optional_storage_runtime_comma = &line.optional_storage_runtime_comma; let optional_storage_runtime_bound_comma = &line.optional_storage_runtime_bound_comma; let optional_storage_where_clause = &line.optional_storage_where_clause; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; + let optional_instance_bound_optional_default = + &def.optional_instance_bound_optional_default; let optional_instance_bound = &def.optional_instance_bound; let optional_instance = &def.optional_instance; let name = &line.name; @@ -87,10 +86,8 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()) }; - let storage_name_bstr = syn::LitByteStr::new( - line.name.to_string().as_ref(), - line.name.span() - ); + let storage_name_bstr = + syn::LitByteStr::new(line.name.to_string().as_ref(), line.name.span()); let storage_generator_trait = &line.storage_generator_trait; let storage_struct = &line.storage_struct; @@ -242,7 +239,7 @@ pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { } } ) - } + }, }; let max_values = if let Some(max_values) = &line.max_values { diff --git a/substrate/frame/support/procedural/src/storage/store_trait.rs b/substrate/frame/support/procedural/src/storage/store_trait.rs index 18adadbc61050c2296b02db5d867b70b7e52d4e4..7dde92cf9a75d51031089c7e787ad9a38d88a916 100644 --- a/substrate/frame/support/procedural/src/storage/store_trait.rs +++ b/substrate/frame/support/procedural/src/storage/store_trait.rs @@ -17,26 +17,26 @@ //! Declaration of store trait and implementation on module structure. +use super::DeclStorageDefExt; use proc_macro2::TokenStream; use quote::quote; -use super::DeclStorageDefExt; pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { - let decl_store_items = def.storage_lines.iter() - .map(|sline| &sline.name) - .fold(TokenStream::new(), |mut items, name| { + let decl_store_items = def.storage_lines.iter().map(|sline| &sline.name).fold( + TokenStream::new(), + |mut items, name| { items.extend(quote!(type #name;)); items - }); + }, + ); - let impl_store_items = def.storage_lines.iter() - .fold(TokenStream::new(), |mut items, line| { - let name = &line.name; - let storage_struct = &line.storage_struct; + let impl_store_items = def.storage_lines.iter().fold(TokenStream::new(), |mut items, line| { + let name = &line.name; + let storage_struct = &line.storage_struct; - items.extend(quote!(type #name = #storage_struct;)); - items - }); + items.extend(quote!(type #name = #storage_struct;)); + items + }); let visibility = &def.visibility; let store_trait = &def.store_trait; diff --git a/substrate/frame/support/procedural/src/transactional.rs b/substrate/frame/support/procedural/src/transactional.rs index 6ef26834cf024476f2544c0178992b6eb43e2431..403f1cd02bac729db74e3e1bba6010b90bbd72f6 100644 --- a/substrate/frame/support/procedural/src/transactional.rs +++ b/substrate/frame/support/procedural/src/transactional.rs @@ -15,10 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +use frame_support_procedural_tools::generate_crate_access_2018; use proc_macro::TokenStream; use quote::quote; use syn::{ItemFn, Result}; -use frame_support_procedural_tools::generate_crate_access_2018; pub fn transactional(_attr: TokenStream, input: TokenStream) -> Result { let ItemFn { attrs, vis, sig, block } = syn::parse(input)?; diff --git a/substrate/frame/support/procedural/tools/derive/src/lib.rs b/substrate/frame/support/procedural/tools/derive/src/lib.rs index 15394e0c559d443e62c02adfbd8e596f09077363..792210589560857b1976132634a28117da3d3a56 100644 --- a/substrate/frame/support/procedural/tools/derive/src/lib.rs +++ b/substrate/frame/support/procedural/tools/derive/src/lib.rs @@ -23,14 +23,14 @@ use proc_macro::TokenStream; use proc_macro2::Span; -use syn::parse_macro_input; use quote::quote; +use syn::parse_macro_input; pub(crate) fn fields_idents( fields: impl Iterator, ) -> impl Iterator { fields.enumerate().map(|(ix, field)| { - field.ident.map(|i| quote!{#i}).unwrap_or_else(|| { + field.ident.map(|i| quote! {#i}).unwrap_or_else(|| { let f_ix: syn::Ident = syn::Ident::new(&format!("f_{}", ix), Span::call_site()); quote!( #f_ix ) }) @@ -42,10 +42,7 @@ pub(crate) fn fields_access( ) -> impl Iterator { fields.enumerate().map(|(ix, field)| { field.ident.map(|i| quote!( #i )).unwrap_or_else(|| { - let f_ix: syn::Index = syn::Index { - index: ix as u32, - span: Span::call_site(), - }; + let f_ix: syn::Index = syn::Index { index: ix as u32, span: Span::call_site() }; quote!( #f_ix ) }) }) @@ -64,15 +61,10 @@ pub fn derive_parse(input: TokenStream) -> TokenStream { } fn derive_parse_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; + let syn::ItemStruct { ident, generics, fields, .. } = input; let field_names = { let name = fields_idents(fields.iter().map(Clone::clone)); - quote!{ + quote! { #( #name, )* @@ -110,12 +102,7 @@ pub fn derive_totokens(input: TokenStream) -> TokenStream { } fn derive_totokens_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; + let syn::ItemStruct { ident, generics, fields, .. } = input; let fields = fields_access(fields.iter().map(Clone::clone)); let tokens = quote! { @@ -133,12 +120,7 @@ fn derive_totokens_struct(input: syn::ItemStruct) -> TokenStream { } fn derive_totokens_enum(input: syn::ItemEnum) -> TokenStream { - let syn::ItemEnum { - ident, - generics, - variants, - .. - } = input; + let syn::ItemEnum { ident, generics, variants, .. } = input; let variants = variants.iter().map(|v| { let v_ident = v.ident.clone(); let fields_build = if v.fields.iter().count() > 0 { diff --git a/substrate/frame/support/procedural/tools/src/lib.rs b/substrate/frame/support/procedural/tools/src/lib.rs index 64f21d66391c22cad1d4d84d9e1c2d5e61a71aaf..19242db4594c0318d1e7b0e8a9836e696a74aca2 100644 --- a/substrate/frame/support/procedural/tools/src/lib.rs +++ b/substrate/frame/support/procedural/tools/src/lib.rs @@ -23,13 +23,13 @@ pub use frame_support_procedural_tools_derive::*; use proc_macro_crate::{crate_name, FoundCrate}; -use syn::parse::Error; use quote::quote; +use syn::parse::Error; pub mod syn_ext; // FIXME #1569, remove the following functions, which are copied from sp-api-macros -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::Ident; fn generate_hidden_includes_mod_name(unique_id: &str) -> Ident { @@ -39,7 +39,7 @@ fn generate_hidden_includes_mod_name(unique_id: &str) -> Ident { /// Generates the access to the `frame-support` crate. pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - quote::quote!( frame_support ) + quote::quote!(frame_support) } else { let mod_name = generate_hidden_includes_mod_name(unique_id); quote::quote!( self::#mod_name::hidden_include ) @@ -55,12 +55,8 @@ pub fn generate_crate_access_2018(def_crate: &str) -> Result let name = def_crate.to_string().replace("-", "_"); Ok(syn::Ident::new(&name, Span::call_site())) }, - Ok(FoundCrate::Name(name)) => { - Ok(Ident::new(&name, Span::call_site())) - }, - Err(e) => { - Err(Error::new(Span::call_site(), e)) - } + Ok(FoundCrate::Name(name)) => Ok(Ident::new(&name, Span::call_site())), + Err(e) => Err(Error::new(Span::call_site(), e)), } } @@ -82,7 +78,7 @@ pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream Err(e) => { let err = Error::new(Span::call_site(), e).to_compile_error(); quote!( #err ) - } + }, } } diff --git a/substrate/frame/support/procedural/tools/src/syn_ext.rs b/substrate/frame/support/procedural/tools/src/syn_ext.rs index 36bd03fed1bef259cded84b79d30a8fa53bf3017..a9e9ef573985f62e5c63d88fae6416b203b702a7 100644 --- a/substrate/frame/support/procedural/tools/src/syn_ext.rs +++ b/substrate/frame/support/procedural/tools/src/syn_ext.rs @@ -19,11 +19,15 @@ //! Extension to syn types, mainly for parsing // end::description[] -use syn::{visit::{Visit, self}, parse::{Parse, ParseStream, Result}, Ident}; +use frame_support_procedural_tools_derive::{Parse, ToTokens}; use proc_macro2::{TokenStream, TokenTree}; use quote::ToTokens; use std::iter::once; -use frame_support_procedural_tools_derive::{ToTokens, Parse}; +use syn::{ + parse::{Parse, ParseStream, Result}, + visit::{self, Visit}, + Ident, +}; /// stop parsing here getting remaining token as content /// Warn duplicate stream (part of) @@ -35,7 +39,6 @@ pub struct StopParse { // inner macro really dependant on syn naming convention, do not export macro_rules! groups_impl { ($name:ident, $tok:ident, $deli:ident, $parse:ident) => { - #[derive(Debug)] pub struct $name